Commit 4958d3a4 authored by Kenton Varda's avatar Kenton Varda

Improve heap allocation.

parent f9ee42ce
...@@ -95,7 +95,7 @@ TextBlob::TextBlob(Params&&... params) { ...@@ -95,7 +95,7 @@ TextBlob::TextBlob(Params&&... params) {
} }
TextBlob::TextBlob(kj::Array<TextBlob>&& params) { TextBlob::TextBlob(kj::Array<TextBlob>&& params) {
branches = kj::newArray<Branch>(params.size()); branches = kj::heapArray<Branch>(params.size());
for (size_t i = 0; i < params.size(); i++) { for (size_t i = 0; i < params.size(); i++) {
branches[i].pos = nullptr; branches[i].pos = nullptr;
branches[i].content = kj::mv(params[i]); branches[i].content = kj::mv(params[i]);
...@@ -113,8 +113,8 @@ void TextBlob::writeTo(kj::OutputStream& out) const { ...@@ -113,8 +113,8 @@ void TextBlob::writeTo(kj::OutputStream& out) const {
} }
void TextBlob::allocate(size_t textSize, size_t branchCount) { void TextBlob::allocate(size_t textSize, size_t branchCount) {
text = kj::newArray<char>(textSize); text = kj::heapArray<char>(textSize);
branches = kj::newArray<Branch>(branchCount); branches = kj::heapArray<Branch>(branchCount);
} }
template <typename First, typename... Rest> template <typename First, typename... Rest>
...@@ -160,7 +160,7 @@ TextBlob text(Params&&... params) { ...@@ -160,7 +160,7 @@ TextBlob text(Params&&... params) {
template <typename List, typename Func> template <typename List, typename Func>
TextBlob forText(List&& list, Func&& func) { TextBlob forText(List&& list, Func&& func) {
kj::Array<TextBlob> items = kj::newArray<TextBlob>(list.size()); kj::Array<TextBlob> items = kj::heapArray<TextBlob>(list.size());
for (size_t i = 0; i < list.size(); i++) { for (size_t i = 0; i < list.size(); i++) {
items[i] = func(list[i]); items[i] = func(list[i]);
} }
......
...@@ -1096,7 +1096,7 @@ internal::RawSchema* SchemaLoader::Impl::tryGet(uint64_t typeId) const { ...@@ -1096,7 +1096,7 @@ internal::RawSchema* SchemaLoader::Impl::tryGet(uint64_t typeId) const {
} }
kj::Array<Schema> SchemaLoader::Impl::getAllLoaded() const { kj::Array<Schema> SchemaLoader::Impl::getAllLoaded() const {
kj::Array<Schema> result = kj::newArray<Schema>(schemas.size()); kj::Array<Schema> result = kj::heapArray<Schema>(schemas.size());
size_t i = 0; size_t i = 0;
for (auto& schema: schemas) { for (auto& schema: schemas) {
result[i++] = Schema(schema.second); result[i++] = Schema(schema.second);
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#define CAPNPROTO_SCHEMA_LOADER_H_ #define CAPNPROTO_SCHEMA_LOADER_H_
#include "schema.h" #include "schema.h"
#include <kj/memory.h>
namespace capnproto { namespace capnproto {
......
...@@ -61,7 +61,7 @@ private: ...@@ -61,7 +61,7 @@ private:
SnappyInputStream::SnappyInputStream(BufferedInputStream& inner, kj::ArrayPtr<byte> buffer) SnappyInputStream::SnappyInputStream(BufferedInputStream& inner, kj::ArrayPtr<byte> buffer)
: inner(inner) { : inner(inner) {
if (buffer.size() < SNAPPY_BUFFER_SIZE) { if (buffer.size() < SNAPPY_BUFFER_SIZE) {
ownedBuffer = kj::newArray<byte>(SNAPPY_BUFFER_SIZE); ownedBuffer = kj::heapArray<byte>(SNAPPY_BUFFER_SIZE);
buffer = ownedBuffer; buffer = ownedBuffer;
} }
this->buffer = buffer; this->buffer = buffer;
...@@ -125,14 +125,14 @@ SnappyOutputStream::SnappyOutputStream( ...@@ -125,14 +125,14 @@ SnappyOutputStream::SnappyOutputStream(
"snappy::MaxCompressedLength() changed?"); "snappy::MaxCompressedLength() changed?");
if (buffer.size() < SNAPPY_BUFFER_SIZE) { if (buffer.size() < SNAPPY_BUFFER_SIZE) {
ownedBuffer = kj::newArray<byte>(SNAPPY_BUFFER_SIZE); ownedBuffer = kj::heapArray<byte>(SNAPPY_BUFFER_SIZE);
buffer = ownedBuffer; buffer = ownedBuffer;
} }
this->buffer = buffer; this->buffer = buffer;
bufferPos = buffer.begin(); bufferPos = buffer.begin();
if (compressedBuffer.size() < SNAPPY_COMPRESSED_BUFFER_SIZE) { if (compressedBuffer.size() < SNAPPY_COMPRESSED_BUFFER_SIZE) {
ownedCompressedBuffer = kj::newArray<byte>(SNAPPY_COMPRESSED_BUFFER_SIZE); ownedCompressedBuffer = kj::heapArray<byte>(SNAPPY_COMPRESSED_BUFFER_SIZE);
compressedBuffer = ownedCompressedBuffer; compressedBuffer = ownedCompressedBuffer;
} }
this->compressedBuffer = compressedBuffer; this->compressedBuffer = compressedBuffer;
......
...@@ -290,7 +290,7 @@ TEST(Serialize, FileDescriptors) { ...@@ -290,7 +290,7 @@ TEST(Serialize, FileDescriptors) {
} }
TEST(Serialize, RejectTooManySegments) { TEST(Serialize, RejectTooManySegments) {
kj::Array<word> data = kj::newArray<word>(8192); kj::Array<word> data = kj::heapArray<word>(8192);
WireValue<uint32_t>* table = reinterpret_cast<WireValue<uint32_t>*>(data.begin()); WireValue<uint32_t>* table = reinterpret_cast<WireValue<uint32_t>*>(data.begin());
table[0].set(1024); table[0].set(1024);
for (uint i = 0; i < 1024; i++) { for (uint i = 0; i < 1024; i++) {
......
...@@ -61,7 +61,7 @@ FlatArrayMessageReader::FlatArrayMessageReader( ...@@ -61,7 +61,7 @@ FlatArrayMessageReader::FlatArrayMessageReader(
offset += segmentSize; offset += segmentSize;
if (segmentCount > 1) { if (segmentCount > 1) {
moreSegments = kj::newArray<kj::ArrayPtr<const word>>(segmentCount - 1); moreSegments = kj::heapArray<kj::ArrayPtr<const word>>(segmentCount - 1);
for (uint i = 1; i < segmentCount; i++) { for (uint i = 1; i < segmentCount; i++) {
uint segmentSize = table[i + 1].get(); uint segmentSize = table[i + 1].get();
...@@ -96,7 +96,7 @@ kj::Array<word> messageToFlatArray(kj::ArrayPtr<const kj::ArrayPtr<const word>> ...@@ -96,7 +96,7 @@ kj::Array<word> messageToFlatArray(kj::ArrayPtr<const kj::ArrayPtr<const word>>
totalSize += segment.size(); totalSize += segment.size();
} }
kj::Array<word> result = kj::newArray<word>(totalSize); kj::Array<word> result = kj::heapArray<word>(totalSize);
internal::WireValue<uint32_t>* table = internal::WireValue<uint32_t>* table =
reinterpret_cast<internal::WireValue<uint32_t>*>(result.begin()); reinterpret_cast<internal::WireValue<uint32_t>*>(result.begin());
...@@ -170,14 +170,14 @@ InputStreamMessageReader::InputStreamMessageReader( ...@@ -170,14 +170,14 @@ InputStreamMessageReader::InputStreamMessageReader(
if (scratchSpace.size() < totalWords) { if (scratchSpace.size() < totalWords) {
// TODO(perf): Consider allocating each segment as a separate chunk to reduce memory // TODO(perf): Consider allocating each segment as a separate chunk to reduce memory
// fragmentation. // fragmentation.
ownedSpace = kj::newArray<word>(totalWords); ownedSpace = kj::heapArray<word>(totalWords);
scratchSpace = ownedSpace; scratchSpace = ownedSpace;
} }
segment0 = scratchSpace.slice(0, segment0Size); segment0 = scratchSpace.slice(0, segment0Size);
if (segmentCount > 1) { if (segmentCount > 1) {
moreSegments = kj::newArray<kj::ArrayPtr<const word>>(segmentCount - 1); moreSegments = kj::heapArray<kj::ArrayPtr<const word>>(segmentCount - 1);
size_t offset = segment0Size; size_t offset = segment0Size;
for (uint i = 0; i < segmentCount - 1; i++) { for (uint i = 0; i < segmentCount - 1; i++) {
......
// Copyright (c) 2013, Kenton Varda <temporal@gmail.com>
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "array.h"
#include <gtest/gtest.h>
#include "logging.h"
#include <string>
#include <list>
namespace kj {
namespace {
struct TestObject {
TestObject() {
index = count;
CHECK(index != throwAt);
++count;
}
TestObject(const TestObject& other) {
CHECK(other.index != throwAt);
index = -1;
copiedCount++;
}
~TestObject() noexcept(false) {
if (index == -1) {
--copiedCount;
} else {
--count;
EXPECT_EQ(index, count);
CHECK(count != throwAt);
}
}
int index;
static int count;
static int copiedCount;
static int throwAt;
};
int TestObject::count = 0;
int TestObject::copiedCount = 0;
int TestObject::throwAt = -1;
struct TestNoexceptObject {
TestNoexceptObject() noexcept {
index = count;
++count;
}
TestNoexceptObject(const TestNoexceptObject& other) noexcept {
index = -1;
copiedCount++;
}
~TestNoexceptObject() noexcept {
if (index == -1) {
--copiedCount;
} else {
--count;
EXPECT_EQ(index, count);
}
}
int index;
static int count;
static int copiedCount;
};
int TestNoexceptObject::count = 0;
int TestNoexceptObject::copiedCount = 0;
TEST(Array, TrivialConstructor) {
char* ptr;
{
Array<char> chars = heapArray<char>(32);
ptr = chars.begin();
chars[0] = 12;
chars[1] = 34;
}
{
Array<char> chars = heapArray<char>(32);
// Somewhat hacky: We can't guarantee that the new array is allocated in the same place, but
// any reasonable allocator is highly likely to do so. If it does, then we expect that the
// memory has not been initialized.
if (chars.begin() == ptr) {
EXPECT_NE(chars[0], 0);
EXPECT_NE(chars[1], 0);
}
}
}
TEST(Array, ComplexConstructor) {
TestObject::count = 0;
TestObject::throwAt = -1;
{
Array<TestObject> array = heapArray<TestObject>(32);
EXPECT_EQ(32, TestObject::count);
}
EXPECT_EQ(0, TestObject::count);
}
TEST(Array, ThrowingConstructor) {
TestObject::count = 0;
TestObject::throwAt = 16;
// If a constructor throws, the previous elements should still be destroyed.
EXPECT_ANY_THROW(heapArray<TestObject>(32));
EXPECT_EQ(0, TestObject::count);
}
TEST(Array, ThrowingDestructor) {
TestObject::count = 0;
TestObject::throwAt = -1;
Array<TestObject> array = heapArray<TestObject>(32);
EXPECT_EQ(32, TestObject::count);
// If a destructor throws, all elements should still be destroyed.
TestObject::throwAt = 16;
EXPECT_ANY_THROW(array = nullptr);
EXPECT_EQ(0, TestObject::count);
}
TEST(Array, AraryBuilder) {
TestObject::count = 0;
TestObject::throwAt = -1;
Array<TestObject> array;
{
ArrayBuilder<TestObject> builder = heapArrayBuilder<TestObject>(32);
for (uint i = 0; i < 32; i++) {
EXPECT_EQ(i, TestObject::count);
builder.add();
}
EXPECT_EQ(32, TestObject::count);
array = builder.finish();
EXPECT_EQ(32, TestObject::count);
}
EXPECT_EQ(32, TestObject::count);
array = nullptr;
EXPECT_EQ(0, TestObject::count);
}
TEST(Array, AraryBuilderAddAll) {
{
// Trivial case.
char text[] = "foo";
ArrayBuilder<char> builder = heapArrayBuilder<char>(5);
builder.add('<');
builder.addAll(text, text + 3);
builder.add('>');
auto array = builder.finish();
EXPECT_EQ("<foo>", std::string(array.begin(), array.end()));
}
{
// Trivial case, const.
const char* text = "foo";
ArrayBuilder<char> builder = heapArrayBuilder<char>(5);
builder.add('<');
builder.addAll(text, text + 3);
builder.add('>');
auto array = builder.finish();
EXPECT_EQ("<foo>", std::string(array.begin(), array.end()));
}
{
// Trivial case, non-pointer iterator.
std::list<char> text = {'f', 'o', 'o'};
ArrayBuilder<char> builder = heapArrayBuilder<char>(5);
builder.add('<');
builder.addAll(text);
builder.add('>');
auto array = builder.finish();
EXPECT_EQ("<foo>", std::string(array.begin(), array.end()));
}
{
// Complex case.
std::string strs[] = {"foo", "bar", "baz"};
ArrayBuilder<std::string> builder = heapArrayBuilder<std::string>(5);
builder.add("qux");
builder.addAll(strs, strs + 3);
builder.add("quux");
auto array = builder.finish();
EXPECT_EQ("qux", array[0]);
EXPECT_EQ("foo", array[1]);
EXPECT_EQ("bar", array[2]);
EXPECT_EQ("baz", array[3]);
EXPECT_EQ("quux", array[4]);
}
{
// Complex case, noexcept.
TestNoexceptObject::count = 0;
TestNoexceptObject::copiedCount = 0;
TestNoexceptObject objs[3];
EXPECT_EQ(3, TestNoexceptObject::count);
EXPECT_EQ(0, TestNoexceptObject::copiedCount);
ArrayBuilder<TestNoexceptObject> builder = heapArrayBuilder<TestNoexceptObject>(3);
EXPECT_EQ(3, TestNoexceptObject::count);
EXPECT_EQ(0, TestNoexceptObject::copiedCount);
builder.addAll(objs, objs + 3);
EXPECT_EQ(3, TestNoexceptObject::count);
EXPECT_EQ(3, TestNoexceptObject::copiedCount);
auto array = builder.finish();
EXPECT_EQ(3, TestNoexceptObject::count);
EXPECT_EQ(3, TestNoexceptObject::copiedCount);
}
EXPECT_EQ(0, TestNoexceptObject::count);
EXPECT_EQ(0, TestNoexceptObject::copiedCount);
{
// Complex case, exceptions possible.
TestObject::count = 0;
TestObject::copiedCount = 0;
TestObject::throwAt = -1;
TestObject objs[3];
EXPECT_EQ(3, TestObject::count);
EXPECT_EQ(0, TestObject::copiedCount);
ArrayBuilder<TestObject> builder = heapArrayBuilder<TestObject>(3);
EXPECT_EQ(3, TestObject::count);
EXPECT_EQ(0, TestObject::copiedCount);
builder.addAll(objs, objs + 3);
EXPECT_EQ(3, TestObject::count);
EXPECT_EQ(3, TestObject::copiedCount);
auto array = builder.finish();
EXPECT_EQ(3, TestObject::count);
EXPECT_EQ(3, TestObject::copiedCount);
}
EXPECT_EQ(0, TestObject::count);
EXPECT_EQ(0, TestObject::copiedCount);
{
// Complex case, exceptions occur.
TestObject::count = 0;
TestObject::copiedCount = 0;
TestObject::throwAt = -1;
TestObject objs[3];
EXPECT_EQ(3, TestObject::count);
EXPECT_EQ(0, TestObject::copiedCount);
TestObject::throwAt = 1;
ArrayBuilder<TestObject> builder = heapArrayBuilder<TestObject>(3);
EXPECT_EQ(3, TestObject::count);
EXPECT_EQ(0, TestObject::copiedCount);
EXPECT_ANY_THROW(builder.addAll(objs, objs + 3));
TestObject::throwAt = -1;
EXPECT_EQ(3, TestObject::count);
EXPECT_EQ(0, TestObject::copiedCount);
}
EXPECT_EQ(0, TestObject::count);
EXPECT_EQ(0, TestObject::copiedCount);
}
} // namespace
} // namespace kj
...@@ -22,7 +22,87 @@ ...@@ -22,7 +22,87 @@
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "array.h" #include "array.h"
#include <iostream>
namespace kj { namespace kj {
ArrayDisposer::~ArrayDisposer() {}
namespace internal {
struct HeapArrayDisposer::ExceptionGuard {
byte* pos;
size_t elementSize;
size_t elementCount;
size_t constructedCount;
void (*destroyElement)(void*);
ExceptionGuard(void* ptr, size_t elementSize, size_t elementCount,
void (*destroyElement)(void*))
: pos(reinterpret_cast<byte*>(ptr) + elementSize * elementCount),
elementSize(elementSize), elementCount(elementCount),
destroyElement(destroyElement) {}
~ExceptionGuard() {
if (pos != nullptr) {
destroyAll();
operator delete(pos);
}
}
void destroyAll() {
while (elementCount > 0) {
pos -= elementSize;
--elementCount;
destroyElement(pos);
}
}
};
void* HeapArrayDisposer::allocateImpl(size_t elementSize, size_t elementCount, size_t capacity,
void (*constructElement)(void*),
void (*destroyElement)(void*)) {
void* result = operator new(elementSize * capacity);
if (constructElement == nullptr) {
// Nothing to do.
} else if (destroyElement == nullptr) {
byte* pos = reinterpret_cast<byte*>(result);
while (elementCount > 0) {
constructElement(pos);
pos += elementSize;
--elementCount;
}
} else {
ExceptionGuard guard(result, elementSize, 0, destroyElement);
while (guard.elementCount < elementCount) {
constructElement(guard.pos);
guard.pos += elementSize;
++guard.elementCount;
}
guard.pos = nullptr;
}
return result;
}
void HeapArrayDisposer::disposeImpl(
void* firstElement, size_t elementSize, size_t elementCount, size_t capacity,
void (*destroyElement)(void*)) const {
// Note that capacity is ignored since operator delete() doesn't care about it.
if (destroyElement == nullptr) {
operator delete(firstElement);
} else {
ExceptionGuard guard(firstElement, elementSize, elementCount, destroyElement);
guard.destroyAll();
// If an exception is thrown, we'll continue the destruction process in ExceptionGuard's
// destructor. If _that_ throws an exception, the program terminates according to C++ rules.
}
}
const HeapArrayDisposer HeapArrayDisposer::instance = HeapArrayDisposer();
} // namespace internal
} // namespace kj } // namespace kj
This diff is collapsed.
...@@ -145,7 +145,7 @@ void inlinePreconditionFailure( ...@@ -145,7 +145,7 @@ void inlinePreconditionFailure(
bool name##_isOnStack = name##_size <= (minStack); \ bool name##_isOnStack = name##_size <= (minStack); \
type name##_stack[minStack]; \ type name##_stack[minStack]; \
::kj::Array<type> name##_heap = name##_isOnStack ? \ ::kj::Array<type> name##_heap = name##_isOnStack ? \
nullptr : kj::newArray<type>(name##_size); \ nullptr : kj::heapArray<type>(name##_size); \
::kj::ArrayPtr<type> name = name##_isOnStack ? \ ::kj::ArrayPtr<type> name = name##_isOnStack ? \
kj::arrayPtr(name##_stack, name##_size) : name##_heap kj::arrayPtr(name##_stack, name##_size) : name##_heap
#else #else
...@@ -154,7 +154,7 @@ void inlinePreconditionFailure( ...@@ -154,7 +154,7 @@ void inlinePreconditionFailure(
bool name##_isOnStack = name##_size <= (maxStack); \ bool name##_isOnStack = name##_size <= (maxStack); \
type name##_stack[name##_isOnStack ? size : 0]; \ type name##_stack[name##_isOnStack ? size : 0]; \
::kj::Array<type> name##_heap = name##_isOnStack ? \ ::kj::Array<type> name##_heap = name##_isOnStack ? \
nullptr : kj::newArray<type>(name##_size); \ nullptr : kj::heapArray<type>(name##_size); \
::kj::ArrayPtr<type> name = name##_isOnStack ? \ ::kj::ArrayPtr<type> name = name##_isOnStack ? \
kj::arrayPtr(name##_stack, name##_size) : name##_heap kj::arrayPtr(name##_stack, name##_size) : name##_heap
#endif #endif
......
...@@ -98,7 +98,7 @@ const char* Exception::what() const noexcept { ...@@ -98,7 +98,7 @@ const char* Exception::what() const noexcept {
} }
} }
Array<Array<char>> contextText = newArray<Array<char>>(contextDepth); Array<Array<char>> contextText = heapArray<Array<char>>(contextDepth);
contextDepth = 0; contextDepth = 0;
contextPtr = &context; contextPtr = &context;
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#define KJ_EXCEPTION_H_ #define KJ_EXCEPTION_H_
#include <exception> #include <exception>
#include "memory.h"
#include "array.h" #include "array.h"
namespace kj { namespace kj {
......
...@@ -52,7 +52,7 @@ void OutputStream::write(ArrayPtr<const ArrayPtr<const byte>> pieces) { ...@@ -52,7 +52,7 @@ void OutputStream::write(ArrayPtr<const ArrayPtr<const byte>> pieces) {
// ======================================================================================= // =======================================================================================
BufferedInputStreamWrapper::BufferedInputStreamWrapper(InputStream& inner, ArrayPtr<byte> buffer) BufferedInputStreamWrapper::BufferedInputStreamWrapper(InputStream& inner, ArrayPtr<byte> buffer)
: inner(inner), ownedBuffer(buffer == nullptr ? newArray<byte>(8192) : nullptr), : inner(inner), ownedBuffer(buffer == nullptr ? heapArray<byte>(8192) : nullptr),
buffer(buffer == nullptr ? ownedBuffer : buffer) {} buffer(buffer == nullptr ? ownedBuffer : buffer) {}
BufferedInputStreamWrapper::~BufferedInputStreamWrapper() {} BufferedInputStreamWrapper::~BufferedInputStreamWrapper() {}
...@@ -118,7 +118,7 @@ void BufferedInputStreamWrapper::skip(size_t bytes) { ...@@ -118,7 +118,7 @@ void BufferedInputStreamWrapper::skip(size_t bytes) {
BufferedOutputStreamWrapper::BufferedOutputStreamWrapper(OutputStream& inner, ArrayPtr<byte> buffer) BufferedOutputStreamWrapper::BufferedOutputStreamWrapper(OutputStream& inner, ArrayPtr<byte> buffer)
: inner(inner), : inner(inner),
ownedBuffer(buffer == nullptr ? newArray<byte>(8192) : nullptr), ownedBuffer(buffer == nullptr ? heapArray<byte>(8192) : nullptr),
buffer(buffer == nullptr ? ownedBuffer : buffer), buffer(buffer == nullptr ? ownedBuffer : buffer),
bufferPos(this->buffer.begin()) {} bufferPos(this->buffer.begin()) {}
......
...@@ -157,7 +157,7 @@ static Array<char> makeDescription(DescriptionStyle style, const char* code, int ...@@ -157,7 +157,7 @@ static Array<char> makeDescription(DescriptionStyle style, const char* code, int
totalSize += argValues[i].size(); totalSize += argValues[i].size();
} }
ArrayBuilder<char> result(totalSize); ArrayBuilder<char> result = heapArrayBuilder<char>(totalSize);
switch (style) { switch (style) {
case LOG: case LOG:
......
...@@ -29,27 +29,40 @@ ...@@ -29,27 +29,40 @@
namespace kj { namespace kj {
// ======================================================================================= // =======================================================================================
// Disposer -- Implementation details.
class Disposer { class Disposer {
// Abstract interface for a thing that disposes of some other object. Often, it makes sense to // Abstract interface for a thing that "disposes" of objects, where "disposing" usually means
// decouple an object from the knowledge of how to dispose of it. // calling the destructor followed by freeing the underlying memory. `Own<T>` encapsulates an
// object pointer with corresponding Disposer.
//
// Few developers will ever touch this interface. It is primarily useful for those implementing
// custom memory allocators.
protected: protected:
virtual ~Disposer(); virtual ~Disposer();
virtual void disposeImpl(void* pointer) const = 0;
// Disposes of the object, given a pointer to the beginning of the object. If the object is
// polymorphic, this pointer is determined by dynamic_cast<void*>(). For non-polymorphic types,
// Own<T> does not allow any casting, so the pointer exactly matches the original one given to
// Own<T>.
public: public:
virtual void dispose(void* interiorPointer) = 0;
// Disposes of the object that this Disposer owns, and possibly disposes of the disposer itself. template <typename T>
void dispose(T* object) const;
// Helper wrapper around disposeImpl().
// //
// Callers must assume that the Disposer itself is no longer valid once this returns -- e.g. it // If T is polymorphic, calls `disposeImpl(dynamic_cast<void*>(object))`, otherwise calls
// might delete itself. Callers must in particular be sure not to call the Disposer again even // `disposeImpl(upcast<void*>(object))`.
// when dispose() throws an exception.
// //
// `interiorPointer` points somewhere inside of the object -- NOT necessarily at the beginning, // Callers must not call dispose() on the same pointer twice, even if the first call throws
// especially in the presence of multiple inheritance. Most implementations should ignore the // an exception.
// pointer, though a tricky memory allocator could get away with sharing one Disposer among
// multiple objects if it can figure out how to find the beginning of the object given an private:
// arbitrary interior pointer. template <typename T, bool polymorphic = __is_polymorphic(T)>
struct Dispose_;
}; };
// ======================================================================================= // =======================================================================================
...@@ -64,11 +77,12 @@ class Own { ...@@ -64,11 +77,12 @@ class Own {
// This is much like std::unique_ptr, except: // This is much like std::unique_ptr, except:
// - You cannot release(). An owned object is not necessarily allocated with new (see next // - You cannot release(). An owned object is not necessarily allocated with new (see next
// point), so it would be hard to use release() correctly. // point), so it would be hard to use release() correctly.
// - The deleter is made polymorphic by virtual call rather than by template. This is a much // - The deleter is made polymorphic by virtual call rather than by template. This is much
// more powerful default -- it allows any random module to decide to use a custom allocator. // more powerful -- it allows the use of custom allocators, freelists, etc. This could
// This could be accomplished with unique_ptr by forcing everyone to use e.g. // _almost_ be accomplished with unique_ptr by forcing everyone to use something like
// std::unique_ptr<T, kj::Disposer&>, but at that point we've lost basically any benefit // std::unique_ptr<T, kj::Deleter>, except that things get hairy in the presence of multiple
// of interoperating with std::unique_ptr anyway. // inheritance and upcasting, and anyway if you force everyone to use a custom deleter
// then you've lost any benefit to interoperating with the "standard" unique_ptr.
public: public:
Own(const Own& other) = delete; Own(const Own& other) = delete;
...@@ -76,8 +90,12 @@ public: ...@@ -76,8 +90,12 @@ public:
: disposer(other.disposer), ptr(other.ptr) { other.ptr = nullptr; } : disposer(other.disposer), ptr(other.ptr) { other.ptr = nullptr; }
template <typename U> template <typename U>
inline Own(Own<U>&& other) noexcept inline Own(Own<U>&& other) noexcept
: disposer(other.disposer), ptr(other.ptr) { other.ptr = nullptr; } : disposer(other.disposer), ptr(other.ptr) {
inline Own(T* ptr, Disposer* disposer) noexcept: disposer(disposer), ptr(ptr) {} static_assert(__is_polymorphic(T),
"Casting owned pointers requires that the target type is polymorphic.");
other.ptr = nullptr;
}
inline Own(T* ptr, const Disposer& disposer) noexcept: disposer(&disposer), ptr(ptr) {}
~Own() noexcept { dispose(); } ~Own() noexcept { dispose(); }
...@@ -99,13 +117,13 @@ public: ...@@ -99,13 +117,13 @@ public:
inline operator const T*() const { return ptr; } inline operator const T*() const { return ptr; }
private: private:
Disposer* disposer; // Only valid if ptr != nullptr. const Disposer* disposer; // Only valid if ptr != nullptr.
T* ptr; T* ptr;
inline void dispose() { inline void dispose() {
// Make sure that if an exception is thrown, we are left with a null ptr, so we won't possibly // Make sure that if an exception is thrown, we are left with a null ptr, so we won't possibly
// dispose again. // dispose again.
void* ptrCopy = ptr; T* ptrCopy = ptr;
if (ptrCopy != nullptr) { if (ptrCopy != nullptr) {
ptr = nullptr; ptr = nullptr;
disposer->dispose(ptrCopy); disposer->dispose(ptrCopy);
...@@ -116,26 +134,50 @@ private: ...@@ -116,26 +134,50 @@ private:
namespace internal { namespace internal {
template <typename T> template <typename T>
class HeapValue final: public Disposer { class HeapDisposer final: public Disposer {
public: public:
template <typename... Params> virtual void disposeImpl(void* pointer) const override { delete reinterpret_cast<T*>(pointer); }
inline HeapValue(Params&&... params): value(kj::fwd<Params>(params)...) {}
virtual void dispose(void*) override { delete this; }
T value; static const HeapDisposer instance;
}; };
template <typename T>
const HeapDisposer<T> HeapDisposer<T>::instance = HeapDisposer<T>();
} // namespace internal } // namespace internal
template <typename T, typename... Params> template <typename T, typename... Params>
Own<T> heap(Params&&... params) { Own<T> heap(Params&&... params) {
// heap<T>(...) allocates a T on the heap, forwarding the parameters to its constructor. The // heap<T>(...) allocates a T on the heap, forwarding the parameters to its constructor. The
// exact heap implementation is unspecified -- for now it is operator new, but you should not // exact heap implementation is unspecified -- for now it is operator new, but you should not
// assume anything. // assume this. (Since we know the object size at delete time, we could actually implement an
// allocator that is more efficient than operator new.)
auto result = new internal::HeapValue<T>(kj::fwd<Params>(params)...); return Own<T>(new T(kj::fwd<Params>(params)...), internal::HeapDisposer<T>::instance);
return Own<T>(&result->value, result); }
// =======================================================================================
// Inline implementation details
template <typename T>
struct Disposer::Dispose_<T, true> {
static void dispose(T* object, const Disposer& disposer) {
// Note that dynamic_cast<void*> does not require RTTI to be enabled, because the offset to
// the top of the object is in the vtable -- as it obviously needs to be to correctly implement
// operator delete.
disposer.disposeImpl(dynamic_cast<void*>(object));
}
};
template <typename T>
struct Disposer::Dispose_<T, false> {
static void dispose(T* object, const Disposer& disposer) {
disposer.disposeImpl(static_cast<void*>(object));
}
};
template <typename T>
void Disposer::dispose(T* object) const {
Dispose_<T>::dispose(object, *this);
} }
} // namespace kj } // namespace kj
......
...@@ -25,11 +25,11 @@ ...@@ -25,11 +25,11 @@
namespace kj { namespace kj {
String::String(const char* value): content(newArray<char>(strlen(value) + 1)) { String::String(const char* value): content(heapArray<char>(strlen(value) + 1)) {
strcpy(content.begin(), value); strcpy(content.begin(), value);
} }
String::String(const char* value, size_t length): content(newArray<char>(length + 1)) { String::String(const char* value, size_t length): content(heapArray<char>(length + 1)) {
memcpy(content.begin(), value, length); memcpy(content.begin(), value, length);
content[length] = '\0'; content[length] = '\0';
} }
......
...@@ -98,7 +98,7 @@ template <typename T, typename Container> ...@@ -98,7 +98,7 @@ template <typename T, typename Container>
Array<T> iterableToArray(Container&& a) { Array<T> iterableToArray(Container&& a) {
// Converts an arbitrary iterable container into an array of the given element type. // Converts an arbitrary iterable container into an array of the given element type.
Array<T> result = newArray<T>(a.size()); Array<T> result = heapArray<T>(a.size());
auto i = a.iterator(); auto i = a.iterator();
auto end = a.end(); auto end = a.end();
T* __restrict__ ptr = result.begin(); T* __restrict__ ptr = result.begin();
...@@ -146,7 +146,7 @@ Array<Element> concat(Params&&... params) { ...@@ -146,7 +146,7 @@ Array<Element> concat(Params&&... params) {
// Eclipse reports a bogus error on `size()`. // Eclipse reports a bogus error on `size()`.
Array<Element> result; Array<Element> result;
#else #else
Array<Element> result = newArray<Element>(sum({params.size()...})); Array<Element> result = heapArray<Element>(sum({params.size()...}));
#endif #endif
fill(result.begin(), std::forward<Params>(params)...); fill(result.begin(), std::forward<Params>(params)...);
return result; return result;
...@@ -230,7 +230,7 @@ Array<char> strArray(T&& arr, const char* delim) { ...@@ -230,7 +230,7 @@ Array<char> strArray(T&& arr, const char* delim) {
size += pieces[i].size(); size += pieces[i].size();
} }
Array<char> result = newArray<char>(size); Array<char> result = heapArray<char>(size);
char* pos = result.begin(); char* pos = result.begin();
for (size_t i = 0; i < arr.size(); i++) { for (size_t i = 0; i < arr.size(); i++) {
if (i > 0) { if (i > 0) {
...@@ -255,7 +255,7 @@ inline Array<char> Stringifier::operator*(const Array<T>& arr) const { ...@@ -255,7 +255,7 @@ inline Array<char> Stringifier::operator*(const Array<T>& arr) const {
template <typename T, typename Func> template <typename T, typename Func>
auto mapArray(T&& arr, Func&& func) -> Array<decltype(func(arr[0]))> { auto mapArray(T&& arr, Func&& func) -> Array<decltype(func(arr[0]))> {
// TODO(cleanup): Use ArrayBuilder. // TODO(cleanup): Use ArrayBuilder.
Array<decltype(func(arr[0]))> result = newArray<decltype(func(arr[0]))>(arr.size()); Array<decltype(func(arr[0]))> result = heapArray<decltype(func(arr[0]))>(arr.size());
size_t pos = 0; size_t pos = 0;
for (auto& element: arr) { for (auto& element: arr) {
result[pos++] = func(element); result[pos++] = func(element);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment