arena.c++ 7.02 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
// Copyright (c) 2013, Kenton Varda <temporal@gmail.com>
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
//    list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
//    this list of conditions and the following disclaimer in the documentation
//    and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

#include "arena.h"
#include "debug.h"
#include <stdint.h>

namespace kj {

30 31 32 33 34 35 36 37 38 39 40 41 42 43
Arena::Arena(size_t chunkSizeHint): state(kj::max(sizeof(ChunkHeader), chunkSizeHint)) {}

Arena::Arena(ArrayPtr<byte> scratch)
    : state(kj::max(sizeof(ChunkHeader), scratch.size())) {
  if (scratch.size() > sizeof(ChunkHeader)) {
    ChunkHeader* chunk = reinterpret_cast<ChunkHeader*>(scratch.begin());
    chunk->end = scratch.end();
    chunk->pos = reinterpret_cast<byte*>(chunk + 1);
    chunk->next = nullptr;  // Never actually observed.

    // Don't place the chunk in the chunk list because it's not ours to delete.  Just make it the
    // current chunk so that we'll allocate from it until it is empty.
    state.getWithoutLock().currentChunk = chunk;
  }
44 45 46 47 48 49 50
}

Arena::~Arena() noexcept(false) {
  // Run cleanup explicitly.  It will be executed again implicitly when state's destructor is
  // called.  This ensures that if the first pass throws an exception, remaining objects are still
  // destroyed.  If the second pass throws, the program terminates, but any destructors that could
  // throw should be using UnwindDetector to avoid this.
51
  state.getWithoutLock().cleanup();
52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
}

void Arena::State::cleanup() {
  while (objectList != nullptr) {
    void* ptr = objectList + 1;
    auto destructor = objectList->destructor;
    objectList = objectList->next;
    destructor(ptr);
  }

  while (chunkList != nullptr) {
    void* ptr = chunkList;
    chunkList = chunkList->next;
    operator delete(ptr);
  }
}

namespace {

constexpr bool isPowerOfTwo(size_t value) {
Kenton Varda's avatar
Kenton Varda committed
72
  return (value & (value - 1)) == 0;
73 74 75 76 77 78 79 80 81 82 83
}

inline byte* alignTo(byte* p, uint alignment) {
  // Round the pointer up to the next aligned value.

  KJ_DASSERT(isPowerOfTwo(alignment), alignment);
  uintptr_t mask = alignment - 1;
  uintptr_t i = reinterpret_cast<uintptr_t>(p);
  return reinterpret_cast<byte*>((i + mask) & ~mask);
}

84 85
inline size_t alignTo(size_t s, uint alignment) {
  // Round the pointer up to the next aligned value.
86 87

  KJ_DASSERT(isPowerOfTwo(alignment), alignment);
88 89 90
  size_t mask = alignment - 1;
  return (s + mask) & ~mask;
}
91

92
}  // namespace
93

94
void* Arena::allocateBytes(size_t amount, uint alignment, bool hasDisposer) const {
95
  if (hasDisposer) {
96 97
    alignment = kj::max(alignment, alignof(ObjectHeader));
    amount += alignTo(sizeof(ObjectHeader), alignment);
98 99
  }

Kenton Varda's avatar
Kenton Varda committed
100
  void* result = allocateBytesLockless(amount, alignment);
101

Kenton Varda's avatar
Kenton Varda committed
102 103 104 105 106 107 108 109 110 111 112 113 114 115
  if (result == nullptr) {
    result = allocateBytesFallback(amount, alignment);
  }

  if (hasDisposer) {
    // Reserve space for the ObjectHeader, but don't add it to the object list yet.
    result = alignTo(reinterpret_cast<byte*>(result) + sizeof(ObjectHeader), alignment);
  }

  KJ_DASSERT(reinterpret_cast<uintptr_t>(result) % alignment == 0);
  return result;
}

void* Arena::allocateBytesLockless(size_t amount, uint alignment) const {
116 117 118 119 120
  for (;;) {
    ChunkHeader* chunk = __atomic_load_n(&state.getWithoutLock().currentChunk, __ATOMIC_ACQUIRE);

    if (chunk == nullptr) {
      // No chunks allocated yet.
Kenton Varda's avatar
Kenton Varda committed
121
      return nullptr;
122 123 124 125 126 127 128 129 130
    }

    byte* pos = __atomic_load_n(&chunk->pos, __ATOMIC_RELAXED);
    byte* alignedPos = alignTo(pos, alignment);
    byte* endPos = alignedPos + amount;

    // Careful about pointer wrapping (e.g. if the chunk is near the end of the address space).
    if (chunk->end - endPos < 0) {
      // Not enough space.
Kenton Varda's avatar
Kenton Varda committed
131
      return nullptr;
132 133 134 135 136
    }

    // There appears to be enough space in this chunk, unless another thread stole it.
    if (KJ_LIKELY(__atomic_compare_exchange_n(
          &chunk->pos, &pos, endPos, true, __ATOMIC_RELAXED, __ATOMIC_RELAXED))) {
Kenton Varda's avatar
Kenton Varda committed
137
      return alignedPos;
138
    }
139

Kenton Varda's avatar
Kenton Varda committed
140
    // Contention.  Retry.
141 142 143
  }
}

144 145 146
void* Arena::allocateBytesFallback(size_t amount, uint alignment) const {
  auto lock = state.lockExclusive();

Kenton Varda's avatar
Kenton Varda committed
147 148 149 150 151 152 153 154 155
  // Now that we have the lock, try one more time to allocate from the current chunk.  This could
  // work if another thread allocated a new chunk while we were waiting for the lock.
  void* locklessResult = allocateBytesLockless(amount, alignment);
  if (locklessResult != nullptr) {
    return locklessResult;
  }

  // OK, we know the current chunk is out of space and we hold the lock so no one else is
  // allocating a new one.  Let's do it!
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180

  alignment = kj::max(alignment, alignof(ChunkHeader));
  amount += alignTo(sizeof(ChunkHeader), alignment);

  while (lock->nextChunkSize < amount) {
    lock->nextChunkSize *= 2;
  }

  byte* bytes = reinterpret_cast<byte*>(operator new(lock->nextChunkSize));

  ChunkHeader* newChunk = reinterpret_cast<ChunkHeader*>(bytes);
  newChunk->next = lock->chunkList;
  newChunk->pos = bytes + amount;
  newChunk->end = bytes + lock->nextChunkSize;
  __atomic_store_n(&lock->currentChunk, newChunk, __ATOMIC_RELEASE);

  lock->nextChunkSize *= 2;

  byte* result = alignTo(bytes + sizeof(ChunkHeader), alignment);
  lock->chunkList = newChunk;

  return result;
}

StringPtr Arena::copyString(StringPtr content) const {
181 182 183 184 185
  char* data = reinterpret_cast<char*>(allocateBytes(content.size() + 1, 1, false));
  memcpy(data, content.cStr(), content.size() + 1);
  return StringPtr(data, content.size());
}

186
void Arena::setDestructor(void* ptr, void (*destructor)(void*)) const {
187
  ObjectHeader* header = reinterpret_cast<ObjectHeader*>(ptr) - 1;
188
  KJ_DASSERT(reinterpret_cast<uintptr_t>(header) % alignof(ObjectHeader) == 0);
189
  header->destructor = destructor;
190 191 192 193 194 195 196 197 198
  header->next = state.getWithoutLock().objectList;

  // We can use relaxed atomics here because the object list is not actually traversed until the
  // destructor, which needs to be synchronized in its own way.
  while (!__atomic_compare_exchange_n(
      &state.getWithoutLock().objectList, &header->next, header, true,
      __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
    // Retry.
  }
199 200 201
}

}  // namespace kj