arena.c++ 10.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
// Copyright (c) 2013, Kenton Varda <temporal@gmail.com>
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
//    list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
//    this list of conditions and the following disclaimer in the documentation
//    and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

Kenton Varda's avatar
Kenton Varda committed
24
#define CAPNP_PRIVATE
25 26
#include "arena.h"
#include "message.h"
27
#include "capability.h"
Kenton Varda's avatar
Kenton Varda committed
28
#include <kj/debug.h>
29
#include <kj/refcount.h>
30 31
#include <vector>
#include <string.h>
32
#include <stdio.h>
33

34
namespace capnp {
35
namespace _ {  // private
36

37 38
Arena::~Arena() noexcept(false) {}

39 40 41
void ReadLimiter::unread(WordCount64 amount) {
  // Be careful not to overflow here.  Since ReadLimiter has no thread-safety, it's possible that
  // the limit value was not updated correctly for one or more reads, and therefore unread() could
David Renshaw's avatar
David Renshaw committed
42
  // overflow it even if it is only unreading bytes that were actually read.
43 44 45
  uint64_t oldValue = limit;
  uint64_t newValue = oldValue + amount / WORDS;
  if (newValue > oldValue) {
46 47 48 49
    limit = newValue;
  }
}

50 51 52 53 54 55 56
void SegmentBuilder::throwNotWritable() {
  KJ_FAIL_REQUIRE(
      "Tried to form a Builder to an external data segment referenced by the MessageBuilder.  "
      "When you use Orphanage::reference*(), you are not allowed to obtain Builders to the "
      "referenced data, only Readers, because that data is const.");
}

57 58
// =======================================================================================

59
ReaderArena::ReaderArena(MessageReader* message)
60
    : message(message),
61 62
      readLimiter(message->getOptions().traversalLimitInWords * WORDS),
      segment0(this, SegmentId(0), message->getSegment(0), &readLimiter) {}
63

64
ReaderArena::~ReaderArena() noexcept(false) {}
65

66
SegmentReader* ReaderArena::tryGetSegment(SegmentId id) {
67 68 69 70 71 72 73 74
  if (id == SegmentId(0)) {
    if (segment0.getArray() == nullptr) {
      return nullptr;
    } else {
      return &segment0;
    }
  }

75
  auto lock = moreSegments.lockExclusive();
76

77
  SegmentMap* segments = nullptr;
78
  KJ_IF_MAYBE(s, *lock) {
79 80
    auto iter = s->get()->find(id.value);
    if (iter != s->get()->end()) {
81
      return iter->second;
82
    }
83
    segments = *s;
84 85
  }

86
  kj::ArrayPtr<const word> newSegment = message->getSegment(id.value);
87
  if (newSegment == nullptr) {
88 89
    return nullptr;
  }
90

91
  if (*lock == nullptr) {
92
    // OK, the segment exists, so allocate the map.
93 94
    auto s = kj::heap<SegmentMap>();
    segments = s;
95
    *lock = kj::mv(s);
96 97
  }

98 99 100 101
  auto segment = kj::heap<SegmentReader>(this, id, newSegment, &readLimiter);
  SegmentReader* result = segment;
  segments->insert(std::make_pair(id.value, mv(segment)));
  return result;
102 103
}

104
void ReaderArena::reportReadLimitReached() {
105
  KJ_FAIL_REQUIRE("Exceeded message traversal limit.  See capnp::ReaderOptions.") {
106 107
    return;
  }
108 109
}

110
kj::Maybe<kj::Own<ClientHook>> ReaderArena::extractCap(uint index) {
111
  if (index < capTable.size()) {
112
    return capTable[index].map([](kj::Own<ClientHook>& cap) { return cap->addRef(); });
113
  } else {
114
    return nullptr;
115
  }
116 117
}

118 119
// =======================================================================================

120
BuilderArena::BuilderArena(MessageBuilder* message)
121
    : message(message), segment0(nullptr, SegmentId(0), nullptr, nullptr) {}
122
BuilderArena::~BuilderArena() noexcept(false) {}
123

124
SegmentBuilder* BuilderArena::getSegment(SegmentId id) {
125
  // This method is allowed to fail if the segment ID is not valid.
126 127 128
  if (id == SegmentId(0)) {
    return &segment0;
  } else {
129
    KJ_IF_MAYBE(s, moreSegments) {
130
      KJ_REQUIRE(id.value - 1 < s->get()->builders.size(), "invalid segment id", id.value);
131
      return const_cast<SegmentBuilder*>(s->get()->builders[id.value - 1].get());
132 133 134
    } else {
      KJ_FAIL_REQUIRE("invalid segment id", id.value);
    }
135
  }
136 137
}

138
BuilderArena::AllocateResult BuilderArena::allocate(WordCount amount) {
139
  if (segment0.getArena() == nullptr) {
140
    // We're allocating the first segment.
141
    kj::ArrayPtr<word> ptr = message->allocateSegment(amount / WORDS);
142 143 144

    // Re-allocate segment0 in-place.  This is a bit of a hack, but we have not returned any
    // pointers to this segment yet, so it should be fine.
145 146
    kj::dtor(segment0);
    kj::ctor(segment0, this, SegmentId(0), ptr, &this->dummyLimiter);
147 148

    segmentWithSpace = &segment0;
149
    return AllocateResult { &segment0, segment0.allocate(amount) };
150
  } else {
151 152
    if (segmentWithSpace != nullptr) {
      // Check if there is space in an existing segment.
153 154 155 156 157 158
      // TODO(perf):  Check for available space in more than just the last segment.  We don't
      //   want this to be O(n), though, so we'll need to maintain some sort of table.  Complicating
      //   matters, we want SegmentBuilders::allocate() to be fast, so we can't update any such
      //   table when allocation actually happens.  Instead, we could have a priority queue based
      //   on the last-known available size, and then re-check the size when we pop segments off it
      //   and shove them to the back of the queue if they have become too small.
159
      word* attempt = segmentWithSpace->allocate(amount);
160
      if (attempt != nullptr) {
161
        return AllocateResult { segmentWithSpace, attempt };
162 163 164
      }
    }

165 166
    // Need to allocate a new segment.
    SegmentBuilder* result = addSegmentInternal(message->allocateSegment(amount / WORDS));
167

168 169
    // Check this new segment first the next time we need to allocate.
    segmentWithSpace = result;
170

171
    // Allocating from the new segment is guaranteed to succeed since we made it big enough.
172
    return AllocateResult { result, result->allocate(amount) };
173 174 175
  }
}

176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207
SegmentBuilder* BuilderArena::addExternalSegment(kj::ArrayPtr<const word> content) {
  return addSegmentInternal(content);
}

template <typename T>
SegmentBuilder* BuilderArena::addSegmentInternal(kj::ArrayPtr<T> content) {
  // This check should never fail in practice, since you can't get an Orphanage without allocating
  // the root segment.
  KJ_REQUIRE(segment0.getArena() != nullptr,
      "Can't allocate external segments before allocating the root segment.");

  MultiSegmentState* segmentState;
  KJ_IF_MAYBE(s, moreSegments) {
    segmentState = *s;
  } else {
    auto newSegmentState = kj::heap<MultiSegmentState>();
    segmentState = newSegmentState;
    moreSegments = kj::mv(newSegmentState);
  }

  kj::Own<SegmentBuilder> newBuilder = kj::heap<SegmentBuilder>(
      this, SegmentId(segmentState->builders.size() + 1), content, &this->dummyLimiter);
  SegmentBuilder* result = newBuilder.get();
  segmentState->builders.add(kj::mv(newBuilder));

  // Keep forOutput the right size so that we don't have to re-allocate during
  // getSegmentsForOutput(), which callers might reasonably expect is a thread-safe method.
  segmentState->forOutput.resize(segmentState->builders.size() + 1);

  return result;
}

208
kj::ArrayPtr<const kj::ArrayPtr<const word>> BuilderArena::getSegmentsForOutput() {
209 210 211 212
  // Although this is a read-only method, we shouldn't need to lock a mutex here because if this
  // is called multiple times simultaneously, we should only be overwriting the array with the
  // exact same data.  If the number or size of segments is actually changing due to an activity
  // in another thread, then the caller has a problem regardless of locking here.
213

214
  KJ_IF_MAYBE(segmentState, moreSegments) {
215
    KJ_DASSERT(segmentState->get()->forOutput.size() == segmentState->get()->builders.size() + 1,
216
        "segmentState->forOutput wasn't resized correctly when the last builder was added.",
217
        segmentState->get()->forOutput.size(), segmentState->get()->builders.size());
218 219

    kj::ArrayPtr<kj::ArrayPtr<const word>> result(
220
        &segmentState->get()->forOutput[0], segmentState->get()->forOutput.size());
221 222
    uint i = 0;
    result[i++] = segment0.currentlyAllocated();
223
    for (auto& builder: segmentState->get()->builders) {
224 225 226 227
      result[i++] = builder->currentlyAllocated();
    }
    return result;
  } else {
228 229 230 231 232 233
    if (segment0.getArena() == nullptr) {
      // We haven't actually allocated any segments yet.
      return nullptr;
    } else {
      // We have only one segment so far.
      segment0ForOutput = segment0.currentlyAllocated();
234
      return kj::arrayPtr(&segment0ForOutput, 1);
235
    }
236 237 238
  }
}

239
SegmentReader* BuilderArena::tryGetSegment(SegmentId id) {
240 241 242 243 244 245 246
  if (id == SegmentId(0)) {
    if (segment0.getArena() == nullptr) {
      // We haven't allocated any segments yet.
      return nullptr;
    } else {
      return &segment0;
    }
247
  } else {
248
    KJ_IF_MAYBE(segmentState, moreSegments) {
249
      if (id.value <= segmentState->get()->builders.size()) {
250 251 252
        // TODO(cleanup):  Return a const SegmentReader and tediously constify all SegmentBuilder
        //   pointers throughout the codebase.
        return const_cast<SegmentReader*>(kj::implicitCast<const SegmentReader*>(
253
            segmentState->get()->builders[id.value - 1].get()));
254
      }
255
    }
256
    return nullptr;
257 258 259
  }
}

260
void BuilderArena::reportReadLimitReached() {
261
  KJ_FAIL_ASSERT("Read limit reached for BuilderArena, but it should have been unlimited.") {
262 263
    return;
  }
264 265
}

266
kj::Maybe<kj::Own<ClientHook>> BuilderArena::extractCap(uint index) {
267
  if (index < capTable.size()) {
268
    return capTable[index].map([](kj::Own<ClientHook>& cap) { return cap->addRef(); });
269
  } else {
270
    return nullptr;
271
  }
272 273
}

274
uint BuilderArena::injectCap(kj::Own<ClientHook>&& cap) {
275 276 277 278 279
  // TODO(perf):  Detect if the cap is already on the table and reuse the index?  Perhaps this
  //   doesn't happen enough to be worth the effort.
  uint result = capTable.size();
  capTable.add(kj::mv(cap));
  return result;
280 281
}

282
void BuilderArena::dropCap(uint index) {
283 284 285 286
  KJ_ASSERT(index < capTable.size(), "Invalid capability descriptor in message.") {
    return;
  }
  capTable[index] = nullptr;
287 288
}

289
}  // namespace _ (private)
290
}  // namespace capnp