arena.c++ 12.3 KB
Newer Older
Kenton Varda's avatar
Kenton Varda committed
1 2
// Copyright (c) 2013-2014 Sandstorm Development Group, Inc. and contributors
// Licensed under the MIT License:
3
//
Kenton Varda's avatar
Kenton Varda committed
4 5 6 7 8 9
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
10
//
Kenton Varda's avatar
Kenton Varda committed
11 12
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
13
//
Kenton Varda's avatar
Kenton Varda committed
14 15 16 17 18 19 20
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
21

Kenton Varda's avatar
Kenton Varda committed
22
#define CAPNP_PRIVATE
23 24
#include "arena.h"
#include "message.h"
Kenton Varda's avatar
Kenton Varda committed
25
#include <kj/debug.h>
26
#include <kj/refcount.h>
27 28
#include <vector>
#include <string.h>
29
#include <stdio.h>
30
#include <stdlib.h>
31

32 33 34 35
#if !CAPNP_LITE
#include "capability.h"
#endif  // !CAPNP_LITE

36
namespace capnp {
37
namespace _ {  // private
38

39 40
Arena::~Arena() noexcept(false) {}

41 42 43
void ReadLimiter::unread(WordCount64 amount) {
  // Be careful not to overflow here.  Since ReadLimiter has no thread-safety, it's possible that
  // the limit value was not updated correctly for one or more reads, and therefore unread() could
David Renshaw's avatar
David Renshaw committed
44
  // overflow it even if it is only unreading bytes that were actually read.
45
  uint64_t oldValue = limit;
46
  uint64_t newValue = oldValue + unbound(amount / WORDS);
47
  if (newValue > oldValue) {
48 49 50 51
    limit = newValue;
  }
}

52 53 54 55 56 57
void SegmentReader::abortCheckObjectFault() {
  KJ_LOG(FATAL, "checkObject()'s parameter is not in-range; this would segfault in opt mode",
                "this is a serious bug in Cap'n Proto; please notify security@sandstorm.io");
  abort();
}

58 59 60 61 62 63 64
void SegmentBuilder::throwNotWritable() {
  KJ_FAIL_REQUIRE(
      "Tried to form a Builder to an external data segment referenced by the MessageBuilder.  "
      "When you use Orphanage::reference*(), you are not allowed to obtain Builders to the "
      "referenced data, only Readers, because that data is const.");
}

65 66
// =======================================================================================

67
static SegmentWordCount verifySegmentSize(size_t size) {
68
  auto gsize = bounded(size) * WORDS;
69 70 71 72 73 74 75
  return assertMaxBits<SEGMENT_WORD_COUNT_BITS>(gsize, [&]() {
    KJ_FAIL_REQUIRE("segment is too large", size);
  });
}

inline ReaderArena::ReaderArena(MessageReader* message, const word* firstSegment,
                                SegmentWordCount firstSegmentSize)
76
    : message(message),
77
      readLimiter(bounded(message->getOptions().traversalLimitInWords) * WORDS),
78 79 80 81 82 83 84
      segment0(this, SegmentId(0), firstSegment, firstSegmentSize, &readLimiter) {}

inline ReaderArena::ReaderArena(MessageReader* message, kj::ArrayPtr<const word> firstSegment)
    : ReaderArena(message, firstSegment.begin(), verifySegmentSize(firstSegment.size())) {}

ReaderArena::ReaderArena(MessageReader* message)
    : ReaderArena(message, message->getSegment(0)) {}
85

86
ReaderArena::~ReaderArena() noexcept(false) {}
87

88
SegmentReader* ReaderArena::tryGetSegment(SegmentId id) {
89 90 91 92 93 94 95 96
  if (id == SegmentId(0)) {
    if (segment0.getArray() == nullptr) {
      return nullptr;
    } else {
      return &segment0;
    }
  }

97
  auto lock = moreSegments.lockExclusive();
98

99
  SegmentMap* segments = nullptr;
100
  KJ_IF_MAYBE(s, *lock) {
101 102
    KJ_IF_MAYBE(segment, s->find(id.value)) {
      return *segment;
103
    }
104
    segments = s;
105 106
  }

107
  kj::ArrayPtr<const word> newSegment = message->getSegment(id.value);
108
  if (newSegment == nullptr) {
109 110
    return nullptr;
  }
111

112 113
  SegmentWordCount newSegmentSize = verifySegmentSize(newSegment.size());

114
  if (*lock == nullptr) {
115
    // OK, the segment exists, so allocate the map.
116
    segments = &lock->emplace();
117 118
  }

119 120
  auto segment = kj::heap<SegmentReader>(
      this, id, newSegment.begin(), newSegmentSize, &readLimiter);
121
  SegmentReader* result = segment;
122
  segments->insert(id.value, kj::mv(segment));
123
  return result;
124 125
}

126
void ReaderArena::reportReadLimitReached() {
127
  KJ_FAIL_REQUIRE("Exceeded message traversal limit.  See capnp::ReaderOptions.") {
128 129
    return;
  }
130 131
}

132 133
// =======================================================================================

134
BuilderArena::BuilderArena(MessageBuilder* message)
135
    : message(message), segment0(nullptr, SegmentId(0), nullptr, nullptr) {}
136 137 138 139

BuilderArena::BuilderArena(MessageBuilder* message,
                           kj::ArrayPtr<MessageBuilder::SegmentInit> segments)
    : message(message),
140 141 142
      segment0(this, SegmentId(0), segments[0].space.begin(),
               verifySegmentSize(segments[0].space.size()),
               &this->dummyLimiter, verifySegmentSize(segments[0].wordsUsed)) {
143 144 145 146 147 148
  if (segments.size() > 1) {
    kj::Vector<kj::Own<SegmentBuilder>> builders(segments.size() - 1);

    uint i = 1;
    for (auto& segment: segments.slice(1, segments.size())) {
      builders.add(kj::heap<SegmentBuilder>(
149 150
          this, SegmentId(i++), segment.space.begin(), verifySegmentSize(segment.space.size()),
          &this->dummyLimiter, verifySegmentSize(segment.wordsUsed)));
151 152 153 154 155 156 157 158 159 160 161 162 163 164 165
    }

    kj::Vector<kj::ArrayPtr<const word>> forOutput;
    forOutput.resize(segments.size());

    segmentWithSpace = builders.back();

    this->moreSegments = kj::heap<MultiSegmentState>(
        MultiSegmentState { kj::mv(builders), kj::mv(forOutput) });

  } else {
    segmentWithSpace = &segment0;
  }
}

166
BuilderArena::~BuilderArena() noexcept(false) {}
167

168
SegmentBuilder* BuilderArena::getSegment(SegmentId id) {
169
  // This method is allowed to fail if the segment ID is not valid.
170 171 172
  if (id == SegmentId(0)) {
    return &segment0;
  } else {
173
    KJ_IF_MAYBE(s, moreSegments) {
174
      KJ_REQUIRE(id.value - 1 < s->get()->builders.size(), "invalid segment id", id.value);
175
      return const_cast<SegmentBuilder*>(s->get()->builders[id.value - 1].get());
176 177 178
    } else {
      KJ_FAIL_REQUIRE("invalid segment id", id.value);
    }
179
  }
180 181
}

182
BuilderArena::AllocateResult BuilderArena::allocate(SegmentWordCount amount) {
183
  if (segment0.getArena() == nullptr) {
184
    // We're allocating the first segment.
185
    kj::ArrayPtr<word> ptr = message->allocateSegment(unbound(amount / WORDS));
186
    auto actualSize = verifySegmentSize(ptr.size());
187 188 189

    // Re-allocate segment0 in-place.  This is a bit of a hack, but we have not returned any
    // pointers to this segment yet, so it should be fine.
190
    kj::dtor(segment0);
191
    kj::ctor(segment0, this, SegmentId(0), ptr.begin(), actualSize, &this->dummyLimiter);
192 193

    segmentWithSpace = &segment0;
194
    return AllocateResult { &segment0, segment0.allocate(amount) };
195
  } else {
196 197
    if (segmentWithSpace != nullptr) {
      // Check if there is space in an existing segment.
198 199 200 201 202 203
      // TODO(perf):  Check for available space in more than just the last segment.  We don't
      //   want this to be O(n), though, so we'll need to maintain some sort of table.  Complicating
      //   matters, we want SegmentBuilders::allocate() to be fast, so we can't update any such
      //   table when allocation actually happens.  Instead, we could have a priority queue based
      //   on the last-known available size, and then re-check the size when we pop segments off it
      //   and shove them to the back of the queue if they have become too small.
204
      word* attempt = segmentWithSpace->allocate(amount);
205
      if (attempt != nullptr) {
206
        return AllocateResult { segmentWithSpace, attempt };
207 208 209
      }
    }

210
    // Need to allocate a new segment.
211
    SegmentBuilder* result = addSegmentInternal(message->allocateSegment(unbound(amount / WORDS)));
212

213 214
    // Check this new segment first the next time we need to allocate.
    segmentWithSpace = result;
215

216
    // Allocating from the new segment is guaranteed to succeed since we made it big enough.
217
    return AllocateResult { result, result->allocate(amount) };
218 219 220
  }
}

221 222 223 224 225 226 227 228 229 230 231
SegmentBuilder* BuilderArena::addExternalSegment(kj::ArrayPtr<const word> content) {
  return addSegmentInternal(content);
}

template <typename T>
SegmentBuilder* BuilderArena::addSegmentInternal(kj::ArrayPtr<T> content) {
  // This check should never fail in practice, since you can't get an Orphanage without allocating
  // the root segment.
  KJ_REQUIRE(segment0.getArena() != nullptr,
      "Can't allocate external segments before allocating the root segment.");

232 233
  auto contentSize = verifySegmentSize(content.size());

234 235 236 237 238 239 240 241 242 243
  MultiSegmentState* segmentState;
  KJ_IF_MAYBE(s, moreSegments) {
    segmentState = *s;
  } else {
    auto newSegmentState = kj::heap<MultiSegmentState>();
    segmentState = newSegmentState;
    moreSegments = kj::mv(newSegmentState);
  }

  kj::Own<SegmentBuilder> newBuilder = kj::heap<SegmentBuilder>(
244 245
      this, SegmentId(segmentState->builders.size() + 1),
      content.begin(), contentSize, &this->dummyLimiter);
246 247 248 249 250 251 252 253 254 255
  SegmentBuilder* result = newBuilder.get();
  segmentState->builders.add(kj::mv(newBuilder));

  // Keep forOutput the right size so that we don't have to re-allocate during
  // getSegmentsForOutput(), which callers might reasonably expect is a thread-safe method.
  segmentState->forOutput.resize(segmentState->builders.size() + 1);

  return result;
}

256
kj::ArrayPtr<const kj::ArrayPtr<const word>> BuilderArena::getSegmentsForOutput() {
257 258 259 260
  // Although this is a read-only method, we shouldn't need to lock a mutex here because if this
  // is called multiple times simultaneously, we should only be overwriting the array with the
  // exact same data.  If the number or size of segments is actually changing due to an activity
  // in another thread, then the caller has a problem regardless of locking here.
261

262
  KJ_IF_MAYBE(segmentState, moreSegments) {
263
    KJ_DASSERT(segmentState->get()->forOutput.size() == segmentState->get()->builders.size() + 1,
264
        "segmentState->forOutput wasn't resized correctly when the last builder was added.",
265
        segmentState->get()->forOutput.size(), segmentState->get()->builders.size());
266 267

    kj::ArrayPtr<kj::ArrayPtr<const word>> result(
268
        &segmentState->get()->forOutput[0], segmentState->get()->forOutput.size());
269 270
    uint i = 0;
    result[i++] = segment0.currentlyAllocated();
271
    for (auto& builder: segmentState->get()->builders) {
272 273 274 275
      result[i++] = builder->currentlyAllocated();
    }
    return result;
  } else {
276 277 278 279 280 281
    if (segment0.getArena() == nullptr) {
      // We haven't actually allocated any segments yet.
      return nullptr;
    } else {
      // We have only one segment so far.
      segment0ForOutput = segment0.currentlyAllocated();
282
      return kj::arrayPtr(&segment0ForOutput, 1);
283
    }
284 285 286
  }
}

287
SegmentReader* BuilderArena::tryGetSegment(SegmentId id) {
288 289 290 291 292 293 294
  if (id == SegmentId(0)) {
    if (segment0.getArena() == nullptr) {
      // We haven't allocated any segments yet.
      return nullptr;
    } else {
      return &segment0;
    }
295
  } else {
296
    KJ_IF_MAYBE(segmentState, moreSegments) {
297
      if (id.value <= segmentState->get()->builders.size()) {
298 299 300
        // TODO(cleanup):  Return a const SegmentReader and tediously constify all SegmentBuilder
        //   pointers throughout the codebase.
        return const_cast<SegmentReader*>(kj::implicitCast<const SegmentReader*>(
301
            segmentState->get()->builders[id.value - 1].get()));
302
      }
303
    }
304
    return nullptr;
305 306 307
  }
}

308
void BuilderArena::reportReadLimitReached() {
309
  KJ_FAIL_ASSERT("Read limit reached for BuilderArena, but it should have been unlimited.") {
310 311
    return;
  }
312 313
}

314
#if !CAPNP_LITE
315
kj::Maybe<kj::Own<ClientHook>> BuilderArena::LocalCapTable::extractCap(uint index) {
316
  if (index < capTable.size()) {
317
    return capTable[index].map([](kj::Own<ClientHook>& cap) { return cap->addRef(); });
318
  } else {
319
    return nullptr;
320
  }
321 322
}

323
uint BuilderArena::LocalCapTable::injectCap(kj::Own<ClientHook>&& cap) {
324 325 326
  uint result = capTable.size();
  capTable.add(kj::mv(cap));
  return result;
327 328
}

329
void BuilderArena::LocalCapTable::dropCap(uint index) {
330 331 332 333
  KJ_ASSERT(index < capTable.size(), "Invalid capability descriptor in message.") {
    return;
  }
  capTable[index] = nullptr;
334
}
335
#endif  // !CAPNP_LITE
336

337
}  // namespace _ (private)
338
}  // namespace capnp