arena.c++ 16.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
// Copyright (c) 2013, Kenton Varda <temporal@gmail.com>
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
//    list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
//    this list of conditions and the following disclaimer in the documentation
//    and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

Kenton Varda's avatar
Kenton Varda committed
24
#define CAPNP_PRIVATE
25 26
#include "arena.h"
#include "message.h"
27
#include "capability.h"
28
#include "capability-context.h"
Kenton Varda's avatar
Kenton Varda committed
29
#include <kj/debug.h>
30
#include <kj/refcount.h>
31 32
#include <vector>
#include <string.h>
33
#include <stdio.h>
34

35
namespace capnp {
36
namespace _ {  // private
37

38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56
namespace {

class BrokenPipeline final: public PipelineHook, public kj::Refcounted {
public:
  BrokenPipeline(const kj::Exception& exception): exception(exception) {}

  kj::Own<const PipelineHook> addRef() const override {
    return kj::addRef(*this);
  }

  kj::Own<const ClientHook> getPipelinedCap(kj::ArrayPtr<const PipelineOp> ops) const override;

private:
  kj::Exception exception;
};

class BrokenRequest final: public RequestHook {
public:
  BrokenRequest(const kj::Exception& exception, uint firstSegmentWordSize)
57 58
      : exception(exception),
        message(firstSegmentWordSize == 0 ? SUGGESTED_FIRST_SEGMENT_WORDS : firstSegmentWordSize) {}
59

Kenton Varda's avatar
Kenton Varda committed
60 61 62
  RemotePromise<ObjectPointer> send() override {
    return RemotePromise<ObjectPointer>(kj::cp(exception),
        ObjectPointer::Pipeline(kj::refcounted<BrokenPipeline>(exception)));
63 64 65
  }

  kj::Exception exception;
66
  LocalMessage message;
67 68 69 70 71 72 73 74 75
};

class BrokenClient final: public ClientHook, public kj::Refcounted {
public:
  BrokenClient(const kj::Exception& exception): exception(exception) {}
  BrokenClient(const char* description)
      : exception(kj::Exception::Nature::PRECONDITION, kj::Exception::Durability::PERMANENT,
                  "", 0, kj::str(description)) {}

Kenton Varda's avatar
Kenton Varda committed
76
  Request<ObjectPointer, ObjectPointer> newCall(
77 78
      uint64_t interfaceId, uint16_t methodId, uint firstSegmentWordSize) const override {
    auto hook = kj::heap<BrokenRequest>(exception, firstSegmentWordSize);
Kenton Varda's avatar
Kenton Varda committed
79 80
    auto root = hook->message.getRoot();
    return Request<ObjectPointer, ObjectPointer>(root, kj::mv(hook));
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
  }

  VoidPromiseAndPipeline call(uint64_t interfaceId, uint16_t methodId,
                              kj::Own<CallContextHook>&& context) const override {
    return VoidPromiseAndPipeline { kj::cp(exception), kj::heap<BrokenPipeline>(exception) };
  }

  kj::Maybe<kj::Promise<kj::Own<const ClientHook>>> whenMoreResolved() const override {
    return kj::Promise<kj::Own<const ClientHook>>(kj::cp(exception));
  }

  kj::Own<const ClientHook> addRef() const override {
    return kj::addRef(*this);
  }

Kenton Varda's avatar
Kenton Varda committed
96
  const void* getBrand() const override {
97 98 99 100 101 102 103 104 105 106 107 108 109 110
    return nullptr;
  }

private:
  kj::Exception exception;
};

kj::Own<const ClientHook> BrokenPipeline::getPipelinedCap(
    kj::ArrayPtr<const PipelineOp> ops) const {
  return kj::heap<BrokenClient>(exception);
}

}  // namespace

111 112
kj::Own<const ClientHook> newBrokenCap(const char* reason) {
  return kj::refcounted<BrokenClient>(reason);
113 114
}

115 116 117
Arena::~Arena() noexcept(false) {}
BuilderArena::~BuilderArena() noexcept(false) {}

118 119 120
void ReadLimiter::unread(WordCount64 amount) {
  // Be careful not to overflow here.  Since ReadLimiter has no thread-safety, it's possible that
  // the limit value was not updated correctly for one or more reads, and therefore unread() could
David Renshaw's avatar
David Renshaw committed
121
  // overflow it even if it is only unreading bytes that were actually read.
122 123 124
  uint64_t oldValue = limit;
  uint64_t newValue = oldValue + amount / WORDS;
  if (newValue > oldValue) {
125 126 127 128
    limit = newValue;
  }
}

129 130
// =======================================================================================

131
BasicReaderArena::BasicReaderArena(MessageReader* message)
132
    : message(message),
133 134
      readLimiter(message->getOptions().traversalLimitInWords * WORDS),
      segment0(this, SegmentId(0), message->getSegment(0), &readLimiter) {}
135

136
BasicReaderArena::~BasicReaderArena() noexcept(false) {}
137

138
SegmentReader* BasicReaderArena::tryGetSegment(SegmentId id) {
139 140 141 142 143 144 145 146
  if (id == SegmentId(0)) {
    if (segment0.getArray() == nullptr) {
      return nullptr;
    } else {
      return &segment0;
    }
  }

147
  auto lock = moreSegments.lockExclusive();
148

149
  SegmentMap* segments = nullptr;
150
  KJ_IF_MAYBE(s, *lock) {
151 152
    auto iter = s->get()->find(id.value);
    if (iter != s->get()->end()) {
153
      return iter->second;
154
    }
155
    segments = *s;
156 157
  }

158
  kj::ArrayPtr<const word> newSegment = message->getSegment(id.value);
159
  if (newSegment == nullptr) {
160 161
    return nullptr;
  }
162

163
  if (*lock == nullptr) {
164
    // OK, the segment exists, so allocate the map.
165 166
    auto s = kj::heap<SegmentMap>();
    segments = s;
167
    *lock = kj::mv(s);
168 169
  }

170 171 172 173
  auto segment = kj::heap<SegmentReader>(this, id, newSegment, &readLimiter);
  SegmentReader* result = segment;
  segments->insert(std::make_pair(id.value, mv(segment)));
  return result;
174 175
}

176
void BasicReaderArena::reportReadLimitReached() {
177
  KJ_FAIL_REQUIRE("Exceeded message traversal limit.  See capnp::ReaderOptions.") {
178 179
    return;
  }
180 181
}

182
kj::Own<const ClientHook> BasicReaderArena::extractCap(const _::StructReader& capDescriptor) {
183
  KJ_FAIL_REQUIRE("Message contained a capability but is not imbued with a capability context.") {
184
    return newBrokenCap(
185 186
        "Calling capability extracted from message that was not imbued with a capability "
        "context.");
187 188 189
  }
}

190 191
// =======================================================================================

192 193 194
ImbuedReaderArena::ImbuedReaderArena(Arena* base, CapExtractorBase* capExtractor)
    : base(base), capExtractor(capExtractor),
      segment0(nullptr, SegmentId(0), nullptr, nullptr) {}
195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212
ImbuedReaderArena::~ImbuedReaderArena() noexcept(false) {}

SegmentReader* ImbuedReaderArena::imbue(SegmentReader* baseSegment) {
  if (baseSegment == nullptr) return nullptr;

  if (baseSegment->getSegmentId() == SegmentId(0)) {
    if (segment0.getArena() == nullptr) {
      kj::dtor(segment0);
      kj::ctor(segment0, this, *baseSegment);
    }
    KJ_DASSERT(segment0.getArray().begin() == baseSegment->getArray().begin());
    return &segment0;
  }

  auto lock = moreSegments.lockExclusive();

  SegmentMap* segments = nullptr;
  KJ_IF_MAYBE(s, *lock) {
213 214
    auto iter = s->get()->find(baseSegment);
    if (iter != s->get()->end()) {
215 216 217
      KJ_DASSERT(iter->second->getArray().begin() == baseSegment->getArray().begin());
      return iter->second;
    }
218
    segments = *s;
219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238
  } else {
    auto newMap = kj::heap<SegmentMap>();
    segments = newMap;
    *lock = kj::mv(newMap);
  }

  auto newSegment = kj::heap<SegmentReader>(this, *baseSegment);
  SegmentReader* result = newSegment;
  segments->insert(std::make_pair(baseSegment, mv(newSegment)));
  return result;
}

SegmentReader* ImbuedReaderArena::tryGetSegment(SegmentId id) {
  return imbue(base->tryGetSegment(id));
}

void ImbuedReaderArena::reportReadLimitReached() {
  return base->reportReadLimitReached();
}

239
kj::Own<const ClientHook> ImbuedReaderArena::extractCap(const _::StructReader& capDescriptor) {
240 241 242
  return capExtractor->extractCapInternal(capDescriptor);
}

243 244 245
// =======================================================================================

BasicBuilderArena::BasicBuilderArena(MessageBuilder* message)
246
    : message(message), segment0(nullptr, SegmentId(0), nullptr, nullptr) {}
247
BasicBuilderArena::~BasicBuilderArena() noexcept(false) {}
248

249
SegmentBuilder* BasicBuilderArena::getSegment(SegmentId id) {
250
  // This method is allowed to fail if the segment ID is not valid.
251 252 253
  if (id == SegmentId(0)) {
    return &segment0;
  } else {
254 255
    auto lock = moreSegments.lockShared();
    KJ_IF_MAYBE(s, *lock) {
256
      KJ_REQUIRE(id.value - 1 < s->get()->builders.size(), "invalid segment id", id.value);
257 258
      // TODO(cleanup):  Return a const SegmentBuilder and tediously constify all SegmentBuilder
      //   pointers throughout the codebase.
259
      return const_cast<BasicSegmentBuilder*>(s->get()->builders[id.value - 1].get());
260 261 262
    } else {
      KJ_FAIL_REQUIRE("invalid segment id", id.value);
    }
263
  }
264 265
}

266
BasicBuilderArena::AllocateResult BasicBuilderArena::allocate(WordCount amount) {
267
  if (segment0.getArena() == nullptr) {
268 269 270
    // We're allocating the first segment.  We don't need to worry about threads at this point
    // because calling MessageBuilder::initRoot() from multiple threads is not intended to be safe.
    kj::ArrayPtr<word> ptr = message->allocateSegment(amount / WORDS);
271 272 273

    // Re-allocate segment0 in-place.  This is a bit of a hack, but we have not returned any
    // pointers to this segment yet, so it should be fine.
274 275 276
    kj::dtor(segment0);
    kj::ctor(segment0, this, SegmentId(0), ptr, &this->dummyLimiter);
    return AllocateResult { &segment0, segment0.allocate(amount) };
277
  } else {
278 279 280 281
    // Check if there is space in the first segment.  We can do this without locking.
    word* attempt = segment0.allocate(amount);
    if (attempt != nullptr) {
      return AllocateResult { &segment0, attempt };
282 283
    }

284 285 286
    // Need to fall back to additional segments.

    auto lock = moreSegments.lockExclusive();
287
    MultiSegmentState* segmentState;
288
    KJ_IF_MAYBE(s, *lock) {
289 290 291 292 293 294
      // TODO(perf):  Check for available space in more than just the last segment.  We don't
      //   want this to be O(n), though, so we'll need to maintain some sort of table.  Complicating
      //   matters, we want SegmentBuilders::allocate() to be fast, so we can't update any such
      //   table when allocation actually happens.  Instead, we could have a priority queue based
      //   on the last-known available size, and then re-check the size when we pop segments off it
      //   and shove them to the back of the queue if they have become too small.
295

296
      attempt = s->get()->builders.back()->allocate(amount);
297
      if (attempt != nullptr) {
298
        return AllocateResult { s->get()->builders.back().get(), attempt };
299
      }
300
      segmentState = *s;
301 302 303
    } else {
      auto newSegmentState = kj::heap<MultiSegmentState>();
      segmentState = newSegmentState;
304
      *lock = kj::mv(newSegmentState);
305 306
    }

307
    kj::Own<BasicSegmentBuilder> newBuilder = kj::heap<BasicSegmentBuilder>(
308
        this, SegmentId(segmentState->builders.size() + 1),
309
        message->allocateSegment(amount / WORDS), &this->dummyLimiter);
310
    SegmentBuilder* result = newBuilder.get();
311
    segmentState->builders.push_back(kj::mv(newBuilder));
312 313 314

    // Keep forOutput the right size so that we don't have to re-allocate during
    // getSegmentsForOutput(), which callers might reasonably expect is a thread-safe method.
315
    segmentState->forOutput.resize(segmentState->builders.size() + 1);
316

317 318 319
    // Allocating from the new segment is guaranteed to succeed since no other thread could have
    // received a pointer to it yet (since we still hold the lock).
    return AllocateResult { result, result->allocate(amount) };
320 321 322
  }
}

323
kj::ArrayPtr<const kj::ArrayPtr<const word>> BasicBuilderArena::getSegmentsForOutput() {
324 325 326 327 328
  // We shouldn't need to lock a mutex here because if this is called multiple times simultaneously,
  // we should only be overwriting the array with the exact same data.  If the number or size of
  // segments is actually changing due to an activity in another thread, then the caller has a
  // problem regardless of locking here.

329
  KJ_IF_MAYBE(segmentState, moreSegments.getWithoutLock()) {
330
    KJ_DASSERT(segmentState->get()->forOutput.size() == segmentState->get()->builders.size() + 1,
331
        "segmentState->forOutput wasn't resized correctly when the last builder was added.",
332
        segmentState->get()->forOutput.size(), segmentState->get()->builders.size());
333 334

    kj::ArrayPtr<kj::ArrayPtr<const word>> result(
335
        &segmentState->get()->forOutput[0], segmentState->get()->forOutput.size());
336 337
    uint i = 0;
    result[i++] = segment0.currentlyAllocated();
338
    for (auto& builder: segmentState->get()->builders) {
339 340 341 342
      result[i++] = builder->currentlyAllocated();
    }
    return result;
  } else {
343 344 345 346 347 348
    if (segment0.getArena() == nullptr) {
      // We haven't actually allocated any segments yet.
      return nullptr;
    } else {
      // We have only one segment so far.
      segment0ForOutput = segment0.currentlyAllocated();
349
      return kj::arrayPtr(&segment0ForOutput, 1);
350
    }
351 352 353
  }
}

354
SegmentReader* BasicBuilderArena::tryGetSegment(SegmentId id) {
355 356 357 358 359 360 361
  if (id == SegmentId(0)) {
    if (segment0.getArena() == nullptr) {
      // We haven't allocated any segments yet.
      return nullptr;
    } else {
      return &segment0;
    }
362
  } else {
363 364
    auto lock = moreSegments.lockShared();
    KJ_IF_MAYBE(segmentState, *lock) {
365
      if (id.value <= segmentState->get()->builders.size()) {
366 367 368
        // TODO(cleanup):  Return a const SegmentReader and tediously constify all SegmentBuilder
        //   pointers throughout the codebase.
        return const_cast<SegmentReader*>(kj::implicitCast<const SegmentReader*>(
369
            segmentState->get()->builders[id.value - 1].get()));
370
      }
371
    }
372
    return nullptr;
373 374 375
  }
}

376 377
void BasicBuilderArena::reportReadLimitReached() {
  KJ_FAIL_ASSERT("Read limit reached for BuilderArena, but it should have been unlimited.") {
378 379
    return;
  }
380 381
}

382
kj::Own<const ClientHook> BasicBuilderArena::extractCap(const _::StructReader& capDescriptor) {
383 384 385
  KJ_FAIL_REQUIRE("Message contains no capabilities.");
}

386
OrphanBuilder BasicBuilderArena::injectCap(kj::Own<const ClientHook>&& cap) {
387
  KJ_FAIL_REQUIRE("Cannot inject capability into a builder that has not been imbued with a "
388 389 390 391 392 393 394
                  "capability context.") {
    return OrphanBuilder();
  }
}

void BasicBuilderArena::dropCap(const _::StructReader& capDescriptor) {
  // They only way we could have a cap in the first place is if the error was already reported...
395 396
}

397 398
// =======================================================================================

399 400
ImbuedBuilderArena::ImbuedBuilderArena(BuilderArena* base, CapInjectorBase* capInjector)
    : base(base), capInjector(capInjector), segment0(nullptr) {}
401 402 403 404 405 406 407 408 409
ImbuedBuilderArena::~ImbuedBuilderArena() noexcept(false) {}

SegmentBuilder* ImbuedBuilderArena::imbue(SegmentBuilder* baseSegment) {
  if (baseSegment == nullptr) return nullptr;

  SegmentBuilder* result;
  if (baseSegment->getSegmentId() == SegmentId(0)) {
    if (segment0.getArena() == nullptr) {
      kj::dtor(segment0);
410
      kj::ctor(segment0, this, baseSegment);
411 412 413 414 415 416
    }
    result = &segment0;
  } else {
    auto lock = moreSegments.lockExclusive();
    KJ_IF_MAYBE(segmentState, *lock) {
      auto id = baseSegment->getSegmentId().value;
417 418
      if (id >= segmentState->get()->builders.size()) {
        segmentState->get()->builders.resize(id + 1);
419
      }
420 421
      KJ_IF_MAYBE(segment, segmentState->get()->builders[id]) {
        result = *segment;
422
      } else {
423
        auto newBuilder = kj::heap<ImbuedSegmentBuilder>(this, baseSegment);
424
        result = newBuilder;
425
        segmentState->get()->builders[id] = kj::mv(newBuilder);
426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442
      }
    }
    return nullptr;
  }

  KJ_DASSERT(result->getArray().begin() == baseSegment->getArray().begin());
  return result;
}

SegmentReader* ImbuedBuilderArena::tryGetSegment(SegmentId id) {
  return imbue(static_cast<SegmentBuilder*>(base->tryGetSegment(id)));
}

void ImbuedBuilderArena::reportReadLimitReached() {
  base->reportReadLimitReached();
}

443
kj::Own<const ClientHook> ImbuedBuilderArena::extractCap(const _::StructReader& capDescriptor) {
444 445 446
  return capInjector->getInjectedCapInternal(capDescriptor);
}

447 448 449 450 451
SegmentBuilder* ImbuedBuilderArena::getSegment(SegmentId id) {
  return imbue(base->getSegment(id));
}

BuilderArena::AllocateResult ImbuedBuilderArena::allocate(WordCount amount) {
452
  auto result = base->allocate(amount);
453 454 455 456
  result.segment = imbue(result.segment);
  return result;
}

457 458 459 460 461 462
OrphanBuilder ImbuedBuilderArena::injectCap(kj::Own<const ClientHook>&& cap) {
  return capInjector->injectCapInternal(this, kj::mv(cap));
}

void ImbuedBuilderArena::dropCap(const StructReader& capDescriptor) {
  capInjector->dropCapInternal(capDescriptor);
463 464
}

465
}  // namespace _ (private)
466
}  // namespace capnp