arena.c++ 9.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
// Copyright (c) 2013, Kenton Varda <temporal@gmail.com>
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
//    list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
//    this list of conditions and the following disclaimer in the documentation
//    and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

Kenton Varda's avatar
Kenton Varda committed
24
#define CAPNP_PRIVATE
25 26
#include "arena.h"
#include "message.h"
27
#include "capability.h"
Kenton Varda's avatar
Kenton Varda committed
28
#include <kj/debug.h>
29
#include <kj/refcount.h>
30 31
#include <vector>
#include <string.h>
32
#include <stdio.h>
33

34
namespace capnp {
35
namespace _ {  // private
36

37 38
Arena::~Arena() noexcept(false) {}

39 40 41
void ReadLimiter::unread(WordCount64 amount) {
  // Be careful not to overflow here.  Since ReadLimiter has no thread-safety, it's possible that
  // the limit value was not updated correctly for one or more reads, and therefore unread() could
David Renshaw's avatar
David Renshaw committed
42
  // overflow it even if it is only unreading bytes that were actually read.
43 44 45
  uint64_t oldValue = limit;
  uint64_t newValue = oldValue + amount / WORDS;
  if (newValue > oldValue) {
46 47 48 49
    limit = newValue;
  }
}

50 51
// =======================================================================================

52
ReaderArena::ReaderArena(MessageReader* message)
53
    : message(message),
54 55
      readLimiter(message->getOptions().traversalLimitInWords * WORDS),
      segment0(this, SegmentId(0), message->getSegment(0), &readLimiter) {}
56

57
ReaderArena::~ReaderArena() noexcept(false) {}
58

59
SegmentReader* ReaderArena::tryGetSegment(SegmentId id) {
60 61 62 63 64 65 66 67
  if (id == SegmentId(0)) {
    if (segment0.getArray() == nullptr) {
      return nullptr;
    } else {
      return &segment0;
    }
  }

68
  auto lock = moreSegments.lockExclusive();
69

70
  SegmentMap* segments = nullptr;
71
  KJ_IF_MAYBE(s, *lock) {
72 73
    auto iter = s->get()->find(id.value);
    if (iter != s->get()->end()) {
74
      return iter->second;
75
    }
76
    segments = *s;
77 78
  }

79
  kj::ArrayPtr<const word> newSegment = message->getSegment(id.value);
80
  if (newSegment == nullptr) {
81 82
    return nullptr;
  }
83

84
  if (*lock == nullptr) {
85
    // OK, the segment exists, so allocate the map.
86 87
    auto s = kj::heap<SegmentMap>();
    segments = s;
88
    *lock = kj::mv(s);
89 90
  }

91 92 93 94
  auto segment = kj::heap<SegmentReader>(this, id, newSegment, &readLimiter);
  SegmentReader* result = segment;
  segments->insert(std::make_pair(id.value, mv(segment)));
  return result;
95 96
}

97
void ReaderArena::reportReadLimitReached() {
98
  KJ_FAIL_REQUIRE("Exceeded message traversal limit.  See capnp::ReaderOptions.") {
99 100
    return;
  }
101 102
}

103
kj::Maybe<kj::Own<ClientHook>> ReaderArena::extractCap(uint index) {
104
  if (index < capTable.size()) {
105
    return capTable[index].map([](kj::Own<ClientHook>& cap) { return cap->addRef(); });
106
  } else {
107
    return nullptr;
108
  }
109 110
}

111 112
// =======================================================================================

113
BuilderArena::BuilderArena(MessageBuilder* message)
114
    : message(message), segment0(nullptr, SegmentId(0), nullptr, nullptr) {}
115
BuilderArena::~BuilderArena() noexcept(false) {}
116

117
SegmentBuilder* BuilderArena::getSegment(SegmentId id) {
118
  // This method is allowed to fail if the segment ID is not valid.
119 120 121
  if (id == SegmentId(0)) {
    return &segment0;
  } else {
122
    KJ_IF_MAYBE(s, moreSegments) {
123
      KJ_REQUIRE(id.value - 1 < s->get()->builders.size(), "invalid segment id", id.value);
124
      return const_cast<SegmentBuilder*>(s->get()->builders[id.value - 1].get());
125 126 127
    } else {
      KJ_FAIL_REQUIRE("invalid segment id", id.value);
    }
128
  }
129 130
}

131
BuilderArena::AllocateResult BuilderArena::allocate(WordCount amount) {
132
  if (segment0.getArena() == nullptr) {
133
    // We're allocating the first segment.
134
    kj::ArrayPtr<word> ptr = message->allocateSegment(amount / WORDS);
135 136 137

    // Re-allocate segment0 in-place.  This is a bit of a hack, but we have not returned any
    // pointers to this segment yet, so it should be fine.
138 139 140
    kj::dtor(segment0);
    kj::ctor(segment0, this, SegmentId(0), ptr, &this->dummyLimiter);
    return AllocateResult { &segment0, segment0.allocate(amount) };
141
  } else {
142
    // Check if there is space in the first segment.
143 144 145
    word* attempt = segment0.allocate(amount);
    if (attempt != nullptr) {
      return AllocateResult { &segment0, attempt };
146 147
    }

148 149
    // Need to fall back to additional segments.

150
    MultiSegmentState* segmentState;
151
    KJ_IF_MAYBE(s, moreSegments) {
152 153 154 155 156 157
      // TODO(perf):  Check for available space in more than just the last segment.  We don't
      //   want this to be O(n), though, so we'll need to maintain some sort of table.  Complicating
      //   matters, we want SegmentBuilders::allocate() to be fast, so we can't update any such
      //   table when allocation actually happens.  Instead, we could have a priority queue based
      //   on the last-known available size, and then re-check the size when we pop segments off it
      //   and shove them to the back of the queue if they have become too small.
158

159
      attempt = s->get()->builders.back()->allocate(amount);
160
      if (attempt != nullptr) {
161
        return AllocateResult { s->get()->builders.back().get(), attempt };
162
      }
163
      segmentState = *s;
164 165 166
    } else {
      auto newSegmentState = kj::heap<MultiSegmentState>();
      segmentState = newSegmentState;
167
      moreSegments = kj::mv(newSegmentState);
168 169
    }

170
    kj::Own<SegmentBuilder> newBuilder = kj::heap<SegmentBuilder>(
171
        this, SegmentId(segmentState->builders.size() + 1),
172
        message->allocateSegment(amount / WORDS), &this->dummyLimiter);
173
    SegmentBuilder* result = newBuilder.get();
174
    segmentState->builders.add(kj::mv(newBuilder));
175 176 177

    // Keep forOutput the right size so that we don't have to re-allocate during
    // getSegmentsForOutput(), which callers might reasonably expect is a thread-safe method.
178
    segmentState->forOutput.resize(segmentState->builders.size() + 1);
179

180
    // Allocating from the new segment is guaranteed to succeed since we made it big enough.
181
    return AllocateResult { result, result->allocate(amount) };
182 183 184
  }
}

185
kj::ArrayPtr<const kj::ArrayPtr<const word>> BuilderArena::getSegmentsForOutput() {
186 187 188 189
  // Although this is a read-only method, we shouldn't need to lock a mutex here because if this
  // is called multiple times simultaneously, we should only be overwriting the array with the
  // exact same data.  If the number or size of segments is actually changing due to an activity
  // in another thread, then the caller has a problem regardless of locking here.
190

191
  KJ_IF_MAYBE(segmentState, moreSegments) {
192
    KJ_DASSERT(segmentState->get()->forOutput.size() == segmentState->get()->builders.size() + 1,
193
        "segmentState->forOutput wasn't resized correctly when the last builder was added.",
194
        segmentState->get()->forOutput.size(), segmentState->get()->builders.size());
195 196

    kj::ArrayPtr<kj::ArrayPtr<const word>> result(
197
        &segmentState->get()->forOutput[0], segmentState->get()->forOutput.size());
198 199
    uint i = 0;
    result[i++] = segment0.currentlyAllocated();
200
    for (auto& builder: segmentState->get()->builders) {
201 202 203 204
      result[i++] = builder->currentlyAllocated();
    }
    return result;
  } else {
205 206 207 208 209 210
    if (segment0.getArena() == nullptr) {
      // We haven't actually allocated any segments yet.
      return nullptr;
    } else {
      // We have only one segment so far.
      segment0ForOutput = segment0.currentlyAllocated();
211
      return kj::arrayPtr(&segment0ForOutput, 1);
212
    }
213 214 215
  }
}

216
SegmentReader* BuilderArena::tryGetSegment(SegmentId id) {
217 218 219 220 221 222 223
  if (id == SegmentId(0)) {
    if (segment0.getArena() == nullptr) {
      // We haven't allocated any segments yet.
      return nullptr;
    } else {
      return &segment0;
    }
224
  } else {
225
    KJ_IF_MAYBE(segmentState, moreSegments) {
226
      if (id.value <= segmentState->get()->builders.size()) {
227 228 229
        // TODO(cleanup):  Return a const SegmentReader and tediously constify all SegmentBuilder
        //   pointers throughout the codebase.
        return const_cast<SegmentReader*>(kj::implicitCast<const SegmentReader*>(
230
            segmentState->get()->builders[id.value - 1].get()));
231
      }
232
    }
233
    return nullptr;
234 235 236
  }
}

237
void BuilderArena::reportReadLimitReached() {
238
  KJ_FAIL_ASSERT("Read limit reached for BuilderArena, but it should have been unlimited.") {
239 240
    return;
  }
241 242
}

243
kj::Maybe<kj::Own<ClientHook>> BuilderArena::extractCap(uint index) {
244
  if (index < capTable.size()) {
245
    return capTable[index].map([](kj::Own<ClientHook>& cap) { return cap->addRef(); });
246
  } else {
247
    return nullptr;
248
  }
249 250
}

251
uint BuilderArena::injectCap(kj::Own<ClientHook>&& cap) {
252 253 254 255 256
  // TODO(perf):  Detect if the cap is already on the table and reuse the index?  Perhaps this
  //   doesn't happen enough to be worth the effort.
  uint result = capTable.size();
  capTable.add(kj::mv(cap));
  return result;
257 258
}

259
void BuilderArena::dropCap(uint index) {
260 261 262 263
  KJ_ASSERT(index < capTable.size(), "Invalid capability descriptor in message.") {
    return;
  }
  capTable[index] = nullptr;
264 265
}

266
}  // namespace _ (private)
267
}  // namespace capnp