Commit 64075f16 authored by Kenton Varda's avatar Kenton Varda

Fix subtle bug.

parent b4d403f5
......@@ -97,15 +97,28 @@ void* Arena::allocateBytes(size_t amount, uint alignment, bool hasDisposer) cons
amount += alignTo(sizeof(ObjectHeader), alignment);
}
void* result;
void* result = allocateBytesLockless(amount, alignment);
if (result == nullptr) {
result = allocateBytesFallback(amount, alignment);
}
if (hasDisposer) {
// Reserve space for the ObjectHeader, but don't add it to the object list yet.
result = alignTo(reinterpret_cast<byte*>(result) + sizeof(ObjectHeader), alignment);
}
KJ_DASSERT(reinterpret_cast<uintptr_t>(result) % alignment == 0);
return result;
}
void* Arena::allocateBytesLockless(size_t amount, uint alignment) const {
for (;;) {
ChunkHeader* chunk = __atomic_load_n(&state.getWithoutLock().currentChunk, __ATOMIC_ACQUIRE);
if (chunk == nullptr) {
// No chunks allocated yet.
result = allocateBytesFallback(amount, alignment);
break;
return nullptr;
}
byte* pos = __atomic_load_n(&chunk->pos, __ATOMIC_RELAXED);
......@@ -115,31 +128,31 @@ void* Arena::allocateBytes(size_t amount, uint alignment, bool hasDisposer) cons
// Careful about pointer wrapping (e.g. if the chunk is near the end of the address space).
if (chunk->end - endPos < 0) {
// Not enough space.
result = allocateBytesFallback(amount, alignment);
break;
return nullptr;
}
// There appears to be enough space in this chunk, unless another thread stole it.
if (KJ_LIKELY(__atomic_compare_exchange_n(
&chunk->pos, &pos, endPos, true, __ATOMIC_RELAXED, __ATOMIC_RELAXED))) {
result = alignedPos;
break;
}
return alignedPos;
}
if (hasDisposer) {
// Reserve space for the ObjectHeader, but don't add it to the object list yet.
result = alignTo(reinterpret_cast<byte*>(result) + sizeof(ObjectHeader), alignment);
// Contention. Retry.
}
KJ_DASSERT(reinterpret_cast<uintptr_t>(result) % alignment == 0);
return result;
}
void* Arena::allocateBytesFallback(size_t amount, uint alignment) const {
auto lock = state.lockExclusive();
// We already know that the current chunk is out of space.
// Now that we have the lock, try one more time to allocate from the current chunk. This could
// work if another thread allocated a new chunk while we were waiting for the lock.
void* locklessResult = allocateBytesLockless(amount, alignment);
if (locklessResult != nullptr) {
return locklessResult;
}
// OK, we know the current chunk is out of space and we hold the lock so no one else is
// allocating a new one. Let's do it!
alignment = kj::max(alignment, alignof(ChunkHeader));
amount += alignTo(sizeof(ChunkHeader), alignment);
......
......@@ -114,6 +114,10 @@ private:
// Allocate the given number of bytes. `hasDisposer` must be true if `setDisposer()` may be
// called on this pointer later.
void* allocateBytesLockless(size_t amount, uint alignment) const;
// Try to allocate the given number of bytes without taking a lock. Fails if and only if there
// is no space left in the current chunk.
void* allocateBytesFallback(size_t amount, uint alignment) const;
// Fallback used when the current chunk is out of space.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment