arena.c++ 11.4 KB
Newer Older
Kenton Varda's avatar
Kenton Varda committed
1 2
// Copyright (c) 2013-2014 Sandstorm Development Group, Inc. and contributors
// Licensed under the MIT License:
3
//
Kenton Varda's avatar
Kenton Varda committed
4 5 6 7 8 9
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
10
//
Kenton Varda's avatar
Kenton Varda committed
11 12
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
13
//
Kenton Varda's avatar
Kenton Varda committed
14 15 16 17 18 19 20
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
21

Kenton Varda's avatar
Kenton Varda committed
22
#define CAPNP_PRIVATE
23 24
#include "arena.h"
#include "message.h"
Kenton Varda's avatar
Kenton Varda committed
25
#include <kj/debug.h>
26
#include <kj/refcount.h>
27 28
#include <vector>
#include <string.h>
29
#include <stdio.h>
30

31 32 33 34
#if !CAPNP_LITE
#include "capability.h"
#endif  // !CAPNP_LITE

35
namespace capnp {
36
namespace _ {  // private
37

38 39
Arena::~Arena() noexcept(false) {}

40 41 42
void ReadLimiter::unread(WordCount64 amount) {
  // Be careful not to overflow here.  Since ReadLimiter has no thread-safety, it's possible that
  // the limit value was not updated correctly for one or more reads, and therefore unread() could
David Renshaw's avatar
David Renshaw committed
43
  // overflow it even if it is only unreading bytes that were actually read.
44 45 46
  uint64_t oldValue = limit;
  uint64_t newValue = oldValue + amount / WORDS;
  if (newValue > oldValue) {
47 48 49 50
    limit = newValue;
  }
}

51 52 53 54 55 56 57
void SegmentBuilder::throwNotWritable() {
  KJ_FAIL_REQUIRE(
      "Tried to form a Builder to an external data segment referenced by the MessageBuilder.  "
      "When you use Orphanage::reference*(), you are not allowed to obtain Builders to the "
      "referenced data, only Readers, because that data is const.");
}

58 59
// =======================================================================================

60
ReaderArena::ReaderArena(MessageReader* message)
61
    : message(message),
62 63
      readLimiter(message->getOptions().traversalLimitInWords * WORDS),
      segment0(this, SegmentId(0), message->getSegment(0), &readLimiter) {}
64

65
ReaderArena::~ReaderArena() noexcept(false) {}
66

67
SegmentReader* ReaderArena::tryGetSegment(SegmentId id) {
68 69 70 71 72 73 74 75
  if (id == SegmentId(0)) {
    if (segment0.getArray() == nullptr) {
      return nullptr;
    } else {
      return &segment0;
    }
  }

76
  auto lock = moreSegments.lockExclusive();
77

78
  SegmentMap* segments = nullptr;
79
  KJ_IF_MAYBE(s, *lock) {
80 81
    auto iter = s->get()->find(id.value);
    if (iter != s->get()->end()) {
82
      return iter->second;
83
    }
84
    segments = *s;
85 86
  }

87
  kj::ArrayPtr<const word> newSegment = message->getSegment(id.value);
88
  if (newSegment == nullptr) {
89 90
    return nullptr;
  }
91

92
  if (*lock == nullptr) {
93
    // OK, the segment exists, so allocate the map.
94 95
    auto s = kj::heap<SegmentMap>();
    segments = s;
96
    *lock = kj::mv(s);
97 98
  }

99 100 101 102
  auto segment = kj::heap<SegmentReader>(this, id, newSegment, &readLimiter);
  SegmentReader* result = segment;
  segments->insert(std::make_pair(id.value, mv(segment)));
  return result;
103 104
}

105
void ReaderArena::reportReadLimitReached() {
106
  KJ_FAIL_REQUIRE("Exceeded message traversal limit.  See capnp::ReaderOptions.") {
107 108
    return;
  }
109 110
}

111
#if !CAPNP_LITE
112
kj::Maybe<kj::Own<ClientHook>> ReaderArena::extractCap(uint index) {
113
  if (index < capTable.size()) {
114
    return capTable[index].map([](kj::Own<ClientHook>& cap) { return cap->addRef(); });
115
  } else {
116
    return nullptr;
117
  }
118
}
119
#endif  // !CAPNP_LITE
120

121 122
// =======================================================================================

123
BuilderArena::BuilderArena(MessageBuilder* message)
124
    : message(message), segment0(nullptr, SegmentId(0), nullptr, nullptr) {}
125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151

BuilderArena::BuilderArena(MessageBuilder* message,
                           kj::ArrayPtr<MessageBuilder::SegmentInit> segments)
    : message(message),
      segment0(this, SegmentId(0), segments[0].space, &this->dummyLimiter, segments[0].wordsUsed) {
  if (segments.size() > 1) {
    kj::Vector<kj::Own<SegmentBuilder>> builders(segments.size() - 1);

    uint i = 1;
    for (auto& segment: segments.slice(1, segments.size())) {
      builders.add(kj::heap<SegmentBuilder>(
          this, SegmentId(i++), segment.space, &this->dummyLimiter, segment.wordsUsed));
    }

    kj::Vector<kj::ArrayPtr<const word>> forOutput;
    forOutput.resize(segments.size());

    segmentWithSpace = builders.back();

    this->moreSegments = kj::heap<MultiSegmentState>(
        MultiSegmentState { kj::mv(builders), kj::mv(forOutput) });

  } else {
    segmentWithSpace = &segment0;
  }
}

152
BuilderArena::~BuilderArena() noexcept(false) {}
153

154
SegmentBuilder* BuilderArena::getSegment(SegmentId id) {
155
  // This method is allowed to fail if the segment ID is not valid.
156 157 158
  if (id == SegmentId(0)) {
    return &segment0;
  } else {
159
    KJ_IF_MAYBE(s, moreSegments) {
160
      KJ_REQUIRE(id.value - 1 < s->get()->builders.size(), "invalid segment id", id.value);
161
      return const_cast<SegmentBuilder*>(s->get()->builders[id.value - 1].get());
162 163 164
    } else {
      KJ_FAIL_REQUIRE("invalid segment id", id.value);
    }
165
  }
166 167
}

168
BuilderArena::AllocateResult BuilderArena::allocate(WordCount amount) {
169
  if (segment0.getArena() == nullptr) {
170
    // We're allocating the first segment.
171
    kj::ArrayPtr<word> ptr = message->allocateSegment(amount / WORDS);
172 173 174

    // Re-allocate segment0 in-place.  This is a bit of a hack, but we have not returned any
    // pointers to this segment yet, so it should be fine.
175 176
    kj::dtor(segment0);
    kj::ctor(segment0, this, SegmentId(0), ptr, &this->dummyLimiter);
177 178

    segmentWithSpace = &segment0;
179
    return AllocateResult { &segment0, segment0.allocate(amount) };
180
  } else {
181 182
    if (segmentWithSpace != nullptr) {
      // Check if there is space in an existing segment.
183 184 185 186 187 188
      // TODO(perf):  Check for available space in more than just the last segment.  We don't
      //   want this to be O(n), though, so we'll need to maintain some sort of table.  Complicating
      //   matters, we want SegmentBuilders::allocate() to be fast, so we can't update any such
      //   table when allocation actually happens.  Instead, we could have a priority queue based
      //   on the last-known available size, and then re-check the size when we pop segments off it
      //   and shove them to the back of the queue if they have become too small.
189
      word* attempt = segmentWithSpace->allocate(amount);
190
      if (attempt != nullptr) {
191
        return AllocateResult { segmentWithSpace, attempt };
192 193 194
      }
    }

195 196
    // Need to allocate a new segment.
    SegmentBuilder* result = addSegmentInternal(message->allocateSegment(amount / WORDS));
197

198 199
    // Check this new segment first the next time we need to allocate.
    segmentWithSpace = result;
200

201
    // Allocating from the new segment is guaranteed to succeed since we made it big enough.
202
    return AllocateResult { result, result->allocate(amount) };
203 204 205
  }
}

206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237
SegmentBuilder* BuilderArena::addExternalSegment(kj::ArrayPtr<const word> content) {
  return addSegmentInternal(content);
}

template <typename T>
SegmentBuilder* BuilderArena::addSegmentInternal(kj::ArrayPtr<T> content) {
  // This check should never fail in practice, since you can't get an Orphanage without allocating
  // the root segment.
  KJ_REQUIRE(segment0.getArena() != nullptr,
      "Can't allocate external segments before allocating the root segment.");

  MultiSegmentState* segmentState;
  KJ_IF_MAYBE(s, moreSegments) {
    segmentState = *s;
  } else {
    auto newSegmentState = kj::heap<MultiSegmentState>();
    segmentState = newSegmentState;
    moreSegments = kj::mv(newSegmentState);
  }

  kj::Own<SegmentBuilder> newBuilder = kj::heap<SegmentBuilder>(
      this, SegmentId(segmentState->builders.size() + 1), content, &this->dummyLimiter);
  SegmentBuilder* result = newBuilder.get();
  segmentState->builders.add(kj::mv(newBuilder));

  // Keep forOutput the right size so that we don't have to re-allocate during
  // getSegmentsForOutput(), which callers might reasonably expect is a thread-safe method.
  segmentState->forOutput.resize(segmentState->builders.size() + 1);

  return result;
}

238
kj::ArrayPtr<const kj::ArrayPtr<const word>> BuilderArena::getSegmentsForOutput() {
239 240 241 242
  // Although this is a read-only method, we shouldn't need to lock a mutex here because if this
  // is called multiple times simultaneously, we should only be overwriting the array with the
  // exact same data.  If the number or size of segments is actually changing due to an activity
  // in another thread, then the caller has a problem regardless of locking here.
243

244
  KJ_IF_MAYBE(segmentState, moreSegments) {
245
    KJ_DASSERT(segmentState->get()->forOutput.size() == segmentState->get()->builders.size() + 1,
246
        "segmentState->forOutput wasn't resized correctly when the last builder was added.",
247
        segmentState->get()->forOutput.size(), segmentState->get()->builders.size());
248 249

    kj::ArrayPtr<kj::ArrayPtr<const word>> result(
250
        &segmentState->get()->forOutput[0], segmentState->get()->forOutput.size());
251 252
    uint i = 0;
    result[i++] = segment0.currentlyAllocated();
253
    for (auto& builder: segmentState->get()->builders) {
254 255 256 257
      result[i++] = builder->currentlyAllocated();
    }
    return result;
  } else {
258 259 260 261 262 263
    if (segment0.getArena() == nullptr) {
      // We haven't actually allocated any segments yet.
      return nullptr;
    } else {
      // We have only one segment so far.
      segment0ForOutput = segment0.currentlyAllocated();
264
      return kj::arrayPtr(&segment0ForOutput, 1);
265
    }
266 267 268
  }
}

269
SegmentReader* BuilderArena::tryGetSegment(SegmentId id) {
270 271 272 273 274 275 276
  if (id == SegmentId(0)) {
    if (segment0.getArena() == nullptr) {
      // We haven't allocated any segments yet.
      return nullptr;
    } else {
      return &segment0;
    }
277
  } else {
278
    KJ_IF_MAYBE(segmentState, moreSegments) {
279
      if (id.value <= segmentState->get()->builders.size()) {
280 281 282
        // TODO(cleanup):  Return a const SegmentReader and tediously constify all SegmentBuilder
        //   pointers throughout the codebase.
        return const_cast<SegmentReader*>(kj::implicitCast<const SegmentReader*>(
283
            segmentState->get()->builders[id.value - 1].get()));
284
      }
285
    }
286
    return nullptr;
287 288 289
  }
}

290
void BuilderArena::reportReadLimitReached() {
291
  KJ_FAIL_ASSERT("Read limit reached for BuilderArena, but it should have been unlimited.") {
292 293
    return;
  }
294 295
}

296
#if !CAPNP_LITE
297
kj::Maybe<kj::Own<ClientHook>> BuilderArena::extractCap(uint index) {
298
  if (index < capTable.size()) {
299
    return capTable[index].map([](kj::Own<ClientHook>& cap) { return cap->addRef(); });
300
  } else {
301
    return nullptr;
302
  }
303 304
}

305
uint BuilderArena::injectCap(kj::Own<ClientHook>&& cap) {
306 307 308 309 310
  // TODO(perf):  Detect if the cap is already on the table and reuse the index?  Perhaps this
  //   doesn't happen enough to be worth the effort.
  uint result = capTable.size();
  capTable.add(kj::mv(cap));
  return result;
311 312
}

313
void BuilderArena::dropCap(uint index) {
314 315 316 317
  KJ_ASSERT(index < capTable.size(), "Invalid capability descriptor in message.") {
    return;
  }
  capTable[index] = nullptr;
318
}
319
#endif  // !CAPNP_LITE
320

321
}  // namespace _ (private)
322
}  // namespace capnp