Commit b0868e34 authored by Kenton Varda's avatar Kenton Varda

Replace some STL maps with KJ maps in Cap'n Proto implementation.

So far this is only a small subset of all the STL uses.
parent e443282b
......@@ -98,11 +98,10 @@ SegmentReader* ReaderArena::tryGetSegment(SegmentId id) {
SegmentMap* segments = nullptr;
KJ_IF_MAYBE(s, *lock) {
auto iter = s->get()->find(id.value);
if (iter != s->get()->end()) {
return iter->second;
KJ_IF_MAYBE(segment, s->find(id.value)) {
return *segment;
}
segments = *s;
segments = s;
}
kj::ArrayPtr<const word> newSegment = message->getSegment(id.value);
......@@ -114,15 +113,13 @@ SegmentReader* ReaderArena::tryGetSegment(SegmentId id) {
if (*lock == nullptr) {
// OK, the segment exists, so allocate the map.
auto s = kj::heap<SegmentMap>();
segments = s;
*lock = kj::mv(s);
segments = &lock->emplace();
}
auto segment = kj::heap<SegmentReader>(
this, id, newSegment.begin(), newSegmentSize, &readLimiter);
SegmentReader* result = segment;
segments->insert(std::make_pair(id.value, mv(segment)));
segments->insert(id.value, kj::mv(segment));
return result;
}
......
......@@ -37,7 +37,7 @@
#include "common.h"
#include "message.h"
#include "layout.h"
#include <unordered_map>
#include <kj/map.h>
#if !CAPNP_LITE
#include "capability.h"
......@@ -241,8 +241,8 @@ private:
// Optimize for single-segment messages so that small messages are handled quickly.
SegmentReader segment0;
typedef std::unordered_map<uint, kj::Own<SegmentReader>> SegmentMap;
kj::MutexGuarded<kj::Maybe<kj::Own<SegmentMap>>> moreSegments;
typedef kj::HashMap<uint, kj::Own<SegmentReader>> SegmentMap;
kj::MutexGuarded<kj::Maybe<SegmentMap>> moreSegments;
// We need to mutex-guard the segment map because we lazily initialize segments when they are
// first requested, but a Reader is allowed to be used concurrently in multiple threads. Luckily
// this only applies to large messages.
......
......@@ -46,6 +46,7 @@
#include <capnp/compat/json.h>
#include <errno.h>
#include <stdlib.h>
#include <kj/map.h>
#if _WIN32
#include <process.h>
......@@ -1767,17 +1768,16 @@ private:
// require to function.
struct SourceDirectory {
kj::Path path;
kj::Own<const kj::ReadableDirectory> dir;
bool isSourcePrefix;
};
std::map<kj::PathPtr, SourceDirectory> sourceDirectories;
kj::HashMap<kj::Path, SourceDirectory> sourceDirectories;
// For each import path and source prefix, tracks the directory object we opened for it.
//
// Use via getSourceDirectory().
std::map<const kj::ReadableDirectory*, kj::String> dirPrefixes;
kj::HashMap<const kj::ReadableDirectory*, kj::String> dirPrefixes;
// For each open directory object, maps to a path prefix to add when displaying this path in
// error messages. This keeps track of the original directory name as given by the user, before
// canonicalization.
......@@ -1827,10 +1827,9 @@ private:
if (path.size() == 0) return disk->getRoot();
auto iter = sourceDirectories.find(path);
if (iter != sourceDirectories.end()) {
iter->second.isSourcePrefix = iter->second.isSourcePrefix || isSourcePrefix;
return *iter->second.dir;
KJ_IF_MAYBE(sdir, sourceDirectories.find(path)) {
sdir->isSourcePrefix = sdir->isSourcePrefix || isSourcePrefix;
return *sdir->dir;
}
if (path == cwd) {
......@@ -1843,26 +1842,22 @@ private:
// getDisplayName().
auto& result = disk->getCurrent();
if (isSourcePrefix) {
kj::PathPtr key = path;
kj::Own<const kj::ReadableDirectory> fakeOwn(&result, kj::NullDisposer::instance);
KJ_ASSERT(sourceDirectories.insert(std::make_pair(key,
SourceDirectory { kj::mv(path), kj::mv(fakeOwn), isSourcePrefix })).second);
sourceDirectories.insert(kj::mv(path), { kj::mv(fakeOwn), isSourcePrefix });
}
return result;
}
KJ_IF_MAYBE(dir, disk->getRoot().tryOpenSubdir(path)) {
auto& result = *dir->get();
kj::PathPtr key = path;
KJ_ASSERT(sourceDirectories.insert(std::make_pair(key,
SourceDirectory { kj::mv(path), kj::mv(*dir), isSourcePrefix })).second);
sourceDirectories.insert(kj::mv(path), { kj::mv(*dir), isSourcePrefix });
#if _WIN32
kj::String prefix = pathStr.endsWith("/") || pathStr.endsWith("\\")
? kj::str(pathStr) : kj::str(pathStr, '\\');
#else
kj::String prefix = pathStr.endsWith("/") ? kj::str(pathStr) : kj::str(pathStr, '/');
#endif
KJ_ASSERT(dirPrefixes.insert(std::make_pair(&result, kj::mv(prefix))).second);
dirPrefixes.insert(&result, kj::mv(prefix));
return result;
} else {
return nullptr;
......@@ -1883,9 +1878,8 @@ private:
auto prefix = path.slice(0, i);
auto remainder = path.slice(i, path.size());
auto iter = sourceDirectories.find(prefix);
if (iter != sourceDirectories.end() && iter->second.isSourcePrefix) {
return { *iter->second.dir, remainder.clone() };
KJ_IF_MAYBE(sdir, sourceDirectories.find(prefix)) {
return { *sdir->dir, remainder.clone() };
}
}
......@@ -1914,9 +1908,8 @@ private:
}
kj::String getDisplayName(const kj::ReadableDirectory& dir, kj::PathPtr path) {
auto iter = dirPrefixes.find(&dir);
if (iter != dirPrefixes.end()) {
return kj::str(iter->second, path.toNativeString());
KJ_IF_MAYBE(prefix, dirPrefixes.find(&dir)) {
return kj::str(*prefix, path.toNativeString());
} else if (&dir == &disk->getRoot()) {
return path.toNativeString(true);
} else if (&dir == &disk->getCurrent()) {
......
......@@ -126,9 +126,9 @@ private:
// Space in which we can construct a ReaderArena. We don't use ReaderArena directly here
// because we don't want clients to have to #include arena.h, which itself includes a bunch of
// big STL headers. We don't use a pointer to a ReaderArena because that would require an
// other headers. We don't use a pointer to a ReaderArena because that would require an
// extra malloc on every message which could be expensive when processing small messages.
void* arenaSpace[15 + sizeof(kj::MutexGuarded<void*>) / sizeof(void*)];
void* arenaSpace[17 + sizeof(kj::MutexGuarded<void*>) / sizeof(void*)];
bool allocatedArena;
_::ReaderArena* arena() { return reinterpret_cast<_::ReaderArena*>(arenaSpace); }
......
......@@ -21,9 +21,6 @@
#define CAPNP_PRIVATE
#include "schema-loader.h"
#include <unordered_map>
#include <unordered_set>
#include <map>
#include "message.h"
#include "arena.h"
#include <kj/debug.h>
......@@ -31,6 +28,7 @@
#include <kj/arena.h>
#include <kj/vector.h>
#include <algorithm>
#include <kj/map.h>
#if _MSC_VER
#include <atomic>
......@@ -40,27 +38,6 @@ namespace capnp {
namespace {
struct ByteArrayHash {
size_t operator()(kj::ArrayPtr<const byte> bytes) const {
// FNV hash. Probably sucks, but the code is simple.
//
// TODO(perf): Add CityHash or something to KJ and use it here.
uint64_t hash = 0xcbf29ce484222325ull;
for (byte b: bytes) {
hash = hash * 0x100000001b3ull;
hash ^= b;
}
return hash;
}
};
struct ByteArrayEq {
bool operator()(kj::ArrayPtr<const byte> a, kj::ArrayPtr<const byte> b) const {
return a.size() == b.size() && memcmp(a.begin(), b.begin(), a.size()) == 0;
}
};
struct SchemaBindingsPair {
const _::RawSchema* schema;
const _::RawBrandedSchema::Scope* scopeBindings;
......@@ -68,12 +45,8 @@ struct SchemaBindingsPair {
inline bool operator==(const SchemaBindingsPair& other) const {
return schema == other.schema && scopeBindings == other.scopeBindings;
}
};
struct SchemaBindingsPairHash {
size_t operator()(SchemaBindingsPair pair) const {
return 31 * reinterpret_cast<uintptr_t>(pair.schema) +
reinterpret_cast<uintptr_t>(pair.scopeBindings);
inline uint hashCode() const {
return kj::hashCode(schema, scopeBindings);
}
};
......@@ -150,19 +123,19 @@ public:
kj::Arena arena;
private:
std::unordered_set<kj::ArrayPtr<const byte>, ByteArrayHash, ByteArrayEq> dedupTable;
kj::HashSet<kj::ArrayPtr<const byte>> dedupTable;
// Records raw segments of memory in the arena against which we my want to de-dupe later
// additions. Specifically, RawBrandedSchema binding tables are de-duped.
std::unordered_map<uint64_t, _::RawSchema*> schemas;
std::unordered_map<SchemaBindingsPair, _::RawBrandedSchema*, SchemaBindingsPairHash> brands;
std::unordered_map<const _::RawSchema*, _::RawBrandedSchema*> unboundBrands;
kj::HashMap<uint64_t, _::RawSchema*> schemas;
kj::HashMap<SchemaBindingsPair, _::RawBrandedSchema*> brands;
kj::HashMap<const _::RawSchema*, _::RawBrandedSchema*> unboundBrands;
struct RequiredSize {
uint16_t dataWordCount;
uint16_t pointerCount;
};
std::unordered_map<uint64_t, RequiredSize> structSizeRequirements;
kj::HashMap<uint64_t, RequiredSize> structSizeRequirements;
InitializerImpl initializer;
BrandedInitializerImpl brandedInitializer;
......@@ -285,7 +258,7 @@ public:
loader.arena.allocateArray<const _::RawSchema*>(*count);
uint pos = 0;
for (auto& dep: dependencies) {
result[pos++] = dep.second;
result[pos++] = dep.value;
}
KJ_DASSERT(pos == *count);
return result.begin();
......@@ -296,7 +269,7 @@ public:
kj::ArrayPtr<uint16_t> result = loader.arena.allocateArray<uint16_t>(*count);
uint pos = 0;
for (auto& member: members) {
result[pos++] = member.second;
result[pos++] = member.value;
}
KJ_DASSERT(pos == *count);
return result.begin();
......@@ -310,10 +283,14 @@ private:
SchemaLoader::Impl& loader;
Text::Reader nodeName;
bool isValid;
std::map<uint64_t, _::RawSchema*> dependencies;
// Maps type IDs -> compiled schemas for each dependency.
// Order is important because makeDependencyArray() compiles a sorted array.
kj::TreeMap<uint64_t, _::RawSchema*> dependencies;
// Maps name -> index for each member.
std::map<Text::Reader, uint> members;
// Order is important because makeMemberInfoArray() compiles a sorted array.
kj::TreeMap<Text::Reader, uint> members;
kj::ArrayPtr<uint16_t> membersByDiscriminant;
......@@ -323,8 +300,9 @@ private:
KJ_FAIL_REQUIRE(__VA_ARGS__) { isValid = false; return; }
void validateMemberName(kj::StringPtr name, uint index) {
bool isNewName = members.insert(std::make_pair(name, index)).second;
VALIDATE_SCHEMA(isNewName, "duplicate name", name);
members.upsert(name, index, [&](auto&, auto&&) {
FAIL_VALIDATE_SCHEMA("duplicate name", name);
});
}
void validate(const schema::Node::Struct::Reader& structNode, uint64_t scopeId) {
......@@ -625,12 +603,13 @@ private:
VALIDATE_SCHEMA(node.which() == expectedKind,
"expected a different kind of node for this ID",
id, (uint)expectedKind, (uint)node.which(), node.getDisplayName());
dependencies.insert(std::make_pair(id, existing));
dependencies.upsert(id, existing, [](auto&,auto&&) { /* ignore dupe */ });
return;
}
dependencies.insert(std::make_pair(id, loader.loadEmpty(
id, kj::str("(unknown type used by ", nodeName , ")"), expectedKind, true)));
dependencies.upsert(id, loader.loadEmpty(
id, kj::str("(unknown type used by ", nodeName , ")"), expectedKind, true),
[](auto&,auto&&) { /* ignore dupe */ });
}
#undef VALIDATE_SCHEMA
......@@ -1263,49 +1242,52 @@ _::RawSchema* SchemaLoader::Impl::load(const schema::Node::Reader& reader, bool
}
// Check if we already have a schema for this ID.
_::RawSchema*& slot = schemas[validatedReader.getId()];
_::RawSchema* schema;
bool shouldReplace;
bool shouldClearInitializer;
if (slot == nullptr) {
// Nope, allocate a new RawSchema.
slot = &arena.allocate<_::RawSchema>();
memset(&slot->defaultBrand, 0, sizeof(slot->defaultBrand));
slot->id = validatedReader.getId();
slot->canCastTo = nullptr;
slot->defaultBrand.generic = slot;
slot->lazyInitializer = isPlaceholder ? &initializer : nullptr;
slot->defaultBrand.lazyInitializer = isPlaceholder ? &brandedInitializer : nullptr;
shouldReplace = true;
shouldClearInitializer = false;
} else {
KJ_IF_MAYBE(match, schemas.find(validatedReader.getId())) {
// Yes, check if it is compatible and figure out which schema is newer.
// If the existing slot is a placeholder, but we're upgrading it to a non-placeholder, we
schema = *match;
// If the existing schema is a placeholder, but we're upgrading it to a non-placeholder, we
// need to clear the initializer later.
shouldClearInitializer = slot->lazyInitializer != nullptr && !isPlaceholder;
shouldClearInitializer = schema->lazyInitializer != nullptr && !isPlaceholder;
auto existing = readMessageUnchecked<schema::Node>(slot->encodedNode);
auto existing = readMessageUnchecked<schema::Node>(schema->encodedNode);
CompatibilityChecker checker(*this);
// Prefer to replace the existing schema if the existing schema is a placeholder. Otherwise,
// prefer to keep the existing schema.
shouldReplace = checker.shouldReplace(
existing, validatedReader, slot->lazyInitializer != nullptr);
existing, validatedReader, schema->lazyInitializer != nullptr);
} else {
// Nope, allocate a new RawSchema.
schema = &arena.allocate<_::RawSchema>();
memset(&schema->defaultBrand, 0, sizeof(schema->defaultBrand));
schema->id = validatedReader.getId();
schema->canCastTo = nullptr;
schema->defaultBrand.generic = schema;
schema->lazyInitializer = isPlaceholder ? &initializer : nullptr;
schema->defaultBrand.lazyInitializer = isPlaceholder ? &brandedInitializer : nullptr;
shouldReplace = true;
shouldClearInitializer = false;
schemas.insert(validatedReader.getId(), schema);
}
if (shouldReplace) {
// Initialize the RawSchema.
slot->encodedNode = validated.begin();
slot->encodedSize = validated.size();
slot->dependencies = validator.makeDependencyArray(&slot->dependencyCount);
slot->membersByName = validator.makeMemberInfoArray(&slot->memberCount);
slot->membersByDiscriminant = validator.makeMembersByDiscriminantArray();
schema->encodedNode = validated.begin();
schema->encodedSize = validated.size();
schema->dependencies = validator.makeDependencyArray(&schema->dependencyCount);
schema->membersByName = validator.makeMemberInfoArray(&schema->memberCount);
schema->membersByDiscriminant = validator.makeMembersByDiscriminantArray();
// Even though this schema isn't itself branded, it may have dependencies that are. So, we
// need to set up the "dependencies" map under defaultBrand.
auto deps = makeBrandedDependencies(slot, kj::ArrayPtr<const _::RawBrandedSchema::Scope>());
slot->defaultBrand.dependencies = deps.begin();
slot->defaultBrand.dependencyCount = deps.size();
auto deps = makeBrandedDependencies(schema, kj::ArrayPtr<const _::RawBrandedSchema::Scope>());
schema->defaultBrand.dependencies = deps.begin();
schema->defaultBrand.dependencyCount = deps.size();
}
if (shouldClearInitializer) {
......@@ -1313,94 +1295,91 @@ _::RawSchema* SchemaLoader::Impl::load(const schema::Node::Reader& reader, bool
// dependency list of other schemas. Once the initializer is null, it is live, so we must do
// a release-store here.
#if __GNUC__
__atomic_store_n(&slot->lazyInitializer, nullptr, __ATOMIC_RELEASE);
__atomic_store_n(&slot->defaultBrand.lazyInitializer, nullptr, __ATOMIC_RELEASE);
__atomic_store_n(&schema->lazyInitializer, nullptr, __ATOMIC_RELEASE);
__atomic_store_n(&schema->defaultBrand.lazyInitializer, nullptr, __ATOMIC_RELEASE);
#elif _MSC_VER
std::atomic_thread_fence(std::memory_order_release);
*static_cast<_::RawSchema::Initializer const* volatile*>(&slot->lazyInitializer) = nullptr;
*static_cast<_::RawSchema::Initializer const* volatile*>(&schema->lazyInitializer) = nullptr;
*static_cast<_::RawBrandedSchema::Initializer const* volatile*>(
&slot->defaultBrand.lazyInitializer) = nullptr;
&schema->defaultBrand.lazyInitializer) = nullptr;
#else
#error "Platform not supported"
#endif
}
return slot;
return schema;
}
_::RawSchema* SchemaLoader::Impl::loadNative(const _::RawSchema* nativeSchema) {
_::RawSchema*& slot = schemas[nativeSchema->id];
_::RawSchema* schema;
bool shouldReplace;
bool shouldClearInitializer;
if (slot == nullptr) {
slot = &arena.allocate<_::RawSchema>();
memset(&slot->defaultBrand, 0, sizeof(slot->defaultBrand));
slot->defaultBrand.generic = slot;
slot->lazyInitializer = nullptr;
slot->defaultBrand.lazyInitializer = nullptr;
KJ_IF_MAYBE(match, schemas.find(nativeSchema->id)) {
schema = *match;
if (schema->canCastTo != nullptr) {
// Already loaded natively, or we're currently in the process of loading natively and there
// was a dependency cycle.
KJ_REQUIRE(schema->canCastTo == nativeSchema,
"two different compiled-in type have the same type ID",
nativeSchema->id,
readMessageUnchecked<schema::Node>(nativeSchema->encodedNode).getDisplayName(),
readMessageUnchecked<schema::Node>(schema->canCastTo->encodedNode).getDisplayName());
return schema;
} else {
auto existing = readMessageUnchecked<schema::Node>(schema->encodedNode);
auto native = readMessageUnchecked<schema::Node>(nativeSchema->encodedNode);
CompatibilityChecker checker(*this);
shouldReplace = checker.shouldReplace(existing, native, true);
shouldClearInitializer = schema->lazyInitializer != nullptr;
}
} else {
schema = &arena.allocate<_::RawSchema>();
memset(&schema->defaultBrand, 0, sizeof(schema->defaultBrand));
schema->defaultBrand.generic = schema;
schema->lazyInitializer = nullptr;
schema->defaultBrand.lazyInitializer = nullptr;
shouldReplace = true;
shouldClearInitializer = false; // already cleared above
} else if (slot->canCastTo != nullptr) {
// Already loaded natively, or we're currently in the process of loading natively and there
// was a dependency cycle.
KJ_REQUIRE(slot->canCastTo == nativeSchema,
"two different compiled-in type have the same type ID",
nativeSchema->id,
readMessageUnchecked<schema::Node>(nativeSchema->encodedNode).getDisplayName(),
readMessageUnchecked<schema::Node>(slot->canCastTo->encodedNode).getDisplayName());
return slot;
} else {
auto existing = readMessageUnchecked<schema::Node>(slot->encodedNode);
auto native = readMessageUnchecked<schema::Node>(nativeSchema->encodedNode);
CompatibilityChecker checker(*this);
shouldReplace = checker.shouldReplace(existing, native, true);
shouldClearInitializer = slot->lazyInitializer != nullptr;
schemas.insert(nativeSchema->id, schema);
}
// Since we recurse below, the slot in the hash map could move around. Copy out the pointer
// for subsequent use.
// TODO(cleanup): Above comment is actually not true of unordered_map. Leaving here to explain
// code pattern below.
_::RawSchema* result = slot;
if (shouldReplace) {
// Set the schema to a copy of the native schema, but make sure not to null out lazyInitializer
// yet.
_::RawSchema temp = *nativeSchema;
temp.lazyInitializer = result->lazyInitializer;
*result = temp;
temp.lazyInitializer = schema->lazyInitializer;
*schema = temp;
result->defaultBrand.generic = result;
schema->defaultBrand.generic = schema;
// Indicate that casting is safe. Note that it's important to set this before recursively
// loading dependencies, so that cycles don't cause infinite loops!
result->canCastTo = nativeSchema;
schema->canCastTo = nativeSchema;
// We need to set the dependency list to point at other loader-owned RawSchemas.
kj::ArrayPtr<const _::RawSchema*> dependencies =
arena.allocateArray<const _::RawSchema*>(result->dependencyCount);
arena.allocateArray<const _::RawSchema*>(schema->dependencyCount);
for (uint i = 0; i < nativeSchema->dependencyCount; i++) {
dependencies[i] = loadNative(nativeSchema->dependencies[i]);
}
result->dependencies = dependencies.begin();
schema->dependencies = dependencies.begin();
// Also need to re-do the branded dependencies.
auto deps = makeBrandedDependencies(slot, kj::ArrayPtr<const _::RawBrandedSchema::Scope>());
slot->defaultBrand.dependencies = deps.begin();
slot->defaultBrand.dependencyCount = deps.size();
auto deps = makeBrandedDependencies(schema, kj::ArrayPtr<const _::RawBrandedSchema::Scope>());
schema->defaultBrand.dependencies = deps.begin();
schema->defaultBrand.dependencyCount = deps.size();
// If there is a struct size requirement, we need to make sure that it is satisfied.
auto reqIter = structSizeRequirements.find(nativeSchema->id);
if (reqIter != structSizeRequirements.end()) {
applyStructSizeRequirement(result, reqIter->second.dataWordCount,
reqIter->second.pointerCount);
KJ_IF_MAYBE(sizeReq, structSizeRequirements.find(nativeSchema->id)) {
applyStructSizeRequirement(schema, sizeReq->dataWordCount,
sizeReq->pointerCount);
}
} else {
// The existing schema is newer.
// Indicate that casting is safe. Note that it's important to set this before recursively
// loading dependencies, so that cycles don't cause infinite loops!
result->canCastTo = nativeSchema;
schema->canCastTo = nativeSchema;
// Make sure the dependencies are loaded and compatible.
for (uint i = 0; i < nativeSchema->dependencyCount; i++) {
......@@ -1413,19 +1392,19 @@ _::RawSchema* SchemaLoader::Impl::loadNative(const _::RawSchema* nativeSchema) {
// dependency list of other schemas. Once the initializer is null, it is live, so we must do
// a release-store here.
#if __GNUC__
__atomic_store_n(&result->lazyInitializer, nullptr, __ATOMIC_RELEASE);
__atomic_store_n(&result->defaultBrand.lazyInitializer, nullptr, __ATOMIC_RELEASE);
__atomic_store_n(&schema->lazyInitializer, nullptr, __ATOMIC_RELEASE);
__atomic_store_n(&schema->defaultBrand.lazyInitializer, nullptr, __ATOMIC_RELEASE);
#elif _MSC_VER
std::atomic_thread_fence(std::memory_order_release);
*static_cast<_::RawSchema::Initializer const* volatile*>(&result->lazyInitializer) = nullptr;
*static_cast<_::RawSchema::Initializer const* volatile*>(&schema->lazyInitializer) = nullptr;
*static_cast<_::RawBrandedSchema::Initializer const* volatile*>(
&result->defaultBrand.lazyInitializer) = nullptr;
&schema->defaultBrand.lazyInitializer) = nullptr;
#else
#error "Platform not supported"
#endif
}
return result;
return schema;
}
_::RawSchema* SchemaLoader::Impl::loadEmpty(
......@@ -1539,20 +1518,20 @@ const _::RawBrandedSchema* SchemaLoader::Impl::makeBranded(
return &schema->defaultBrand;
}
auto& slot = brands[SchemaBindingsPair { schema, bindings.begin() }];
if (slot == nullptr) {
SchemaBindingsPair key { schema, bindings.begin() };
KJ_IF_MAYBE(existing, brands.find(key)) {
return *existing;
} else {
auto& brand = arena.allocate<_::RawBrandedSchema>();
memset(&brand, 0, sizeof(brand));
slot = &brand;
brands.insert(key, &brand);
brand.generic = schema;
brand.scopes = bindings.begin();
brand.scopeCount = bindings.size();
brand.lazyInitializer = &brandedInitializer;
return &brand;
}
return slot;
}
kj::ArrayPtr<const _::RawBrandedSchema::Dependency>
......@@ -1783,16 +1762,15 @@ kj::ArrayPtr<const T> SchemaLoader::Impl::copyDeduped(kj::ArrayPtr<const T> valu
auto bytes = values.asBytes();
auto iter = dedupTable.find(bytes);
if (iter != dedupTable.end()) {
return kj::arrayPtr(reinterpret_cast<const T*>(iter->begin()), values.size());
KJ_IF_MAYBE(dupe, dedupTable.find(bytes)) {
return kj::arrayPtr(reinterpret_cast<const T*>(dupe->begin()), values.size());
}
// Need to make a new copy.
auto copy = arena.allocateArray<T>(values.size());
memcpy(copy.begin(), values.begin(), values.size() * sizeof(T));
KJ_ASSERT(dedupTable.insert(copy.asBytes()).second);
dedupTable.insert(copy.asBytes());
return copy;
}
......@@ -1803,11 +1781,10 @@ kj::ArrayPtr<const T> SchemaLoader::Impl::copyDeduped(kj::ArrayPtr<T> values) {
}
SchemaLoader::Impl::TryGetResult SchemaLoader::Impl::tryGet(uint64_t typeId) const {
auto iter = schemas.find(typeId);
if (iter == schemas.end()) {
return {nullptr, initializer.getCallback()};
KJ_IF_MAYBE(schema, schemas.find(typeId)) {
return {*schema, initializer.getCallback()};
} else {
return {iter->second, initializer.getCallback()};
return {nullptr, initializer.getCallback()};
}
}
......@@ -1817,43 +1794,45 @@ const _::RawBrandedSchema* SchemaLoader::Impl::getUnbound(const _::RawSchema* sc
return &schema->defaultBrand;
}
auto& slot = unboundBrands[schema];
if (slot == nullptr) {
slot = &arena.allocate<_::RawBrandedSchema>();
KJ_IF_MAYBE(existing, unboundBrands.find(schema)) {
return *existing;
} else {
auto slot = &arena.allocate<_::RawBrandedSchema>();
memset(slot, 0, sizeof(*slot));
slot->generic = schema;
auto deps = makeBrandedDependencies(schema, nullptr);
slot->dependencies = deps.begin();
slot->dependencyCount = deps.size();
unboundBrands.insert(schema, slot);
return slot;
}
return slot;
}
kj::Array<Schema> SchemaLoader::Impl::getAllLoaded() const {
size_t count = 0;
for (auto& schema: schemas) {
if (schema.second->lazyInitializer == nullptr) ++count;
if (schema.value->lazyInitializer == nullptr) ++count;
}
kj::Array<Schema> result = kj::heapArray<Schema>(count);
size_t i = 0;
for (auto& schema: schemas) {
if (schema.second->lazyInitializer == nullptr) {
result[i++] = Schema(&schema.second->defaultBrand);
if (schema.value->lazyInitializer == nullptr) {
result[i++] = Schema(&schema.value->defaultBrand);
}
}
return result;
}
void SchemaLoader::Impl::requireStructSize(uint64_t id, uint dataWordCount, uint pointerCount) {
auto& slot = structSizeRequirements[id];
slot.dataWordCount = kj::max(slot.dataWordCount, dataWordCount);
slot.pointerCount = kj::max(slot.pointerCount, pointerCount);
structSizeRequirements.upsert(id, { uint16_t(dataWordCount), uint16_t(pointerCount) },
[&](RequiredSize& existingValue, RequiredSize&& newValue) {
existingValue.dataWordCount = kj::max(existingValue.dataWordCount, newValue.dataWordCount);
existingValue.pointerCount = kj::max(existingValue.pointerCount, newValue.pointerCount);
});
auto iter = schemas.find(id);
if (iter != schemas.end()) {
applyStructSizeRequirement(iter->second, dataWordCount, pointerCount);
KJ_IF_MAYBE(schema, schemas.find(id)) {
applyStructSizeRequirement(*schema, dataWordCount, pointerCount);
}
}
......@@ -1868,14 +1847,12 @@ kj::ArrayPtr<word> SchemaLoader::Impl::makeUncheckedNode(schema::Node::Reader no
kj::ArrayPtr<word> SchemaLoader::Impl::makeUncheckedNodeEnforcingSizeRequirements(
schema::Node::Reader node) {
if (node.isStruct()) {
auto iter = structSizeRequirements.find(node.getId());
if (iter != structSizeRequirements.end()) {
auto requirement = iter->second;
KJ_IF_MAYBE(requirement, structSizeRequirements.find(node.getId())) {
auto structNode = node.getStruct();
if (structNode.getDataWordCount() < requirement.dataWordCount ||
structNode.getPointerCount() < requirement.pointerCount) {
return rewriteStructNodeWithSizes(node, requirement.dataWordCount,
requirement.pointerCount);
if (structNode.getDataWordCount() < requirement->dataWordCount ||
structNode.getPointerCount() < requirement->pointerCount) {
return rewriteStructNodeWithSizes(node, requirement->dataWordCount,
requirement->pointerCount);
}
}
}
......@@ -1958,10 +1935,8 @@ void SchemaLoader::BrandedInitializerImpl::init(const _::RawBrandedSchema* schem
}
// Get the mutable version.
auto iter = lock->get()->brands.find(SchemaBindingsPair { schema->generic, schema->scopes });
KJ_ASSERT(iter != lock->get()->brands.end());
_::RawBrandedSchema* mutableSchema = iter->second;
_::RawBrandedSchema* mutableSchema = KJ_ASSERT_NONNULL(
lock->get()->brands.find(SchemaBindingsPair { schema->generic, schema->scopes }));
KJ_ASSERT(mutableSchema == schema);
// Construct its dependency map.
......
......@@ -26,6 +26,7 @@
#include <inttypes.h>
#include "time.h"
#include "function.h"
#include "hash.h"
namespace kj {
......@@ -156,6 +157,9 @@ public:
bool operator>=(PathPtr other) const;
// Compare path components lexically.
uint hashCode() const;
// Can use in HashMap.
bool startsWith(PathPtr prefix) const;
bool endsWith(PathPtr suffix) const;
// Compare prefix / suffix.
......@@ -264,6 +268,7 @@ public:
bool operator> (PathPtr other) const;
bool operator<=(PathPtr other) const;
bool operator>=(PathPtr other) const;
uint hashCode() const;
bool startsWith(PathPtr prefix) const;
bool endsWith(PathPtr suffix) const;
Path evalWin32(StringPtr pathText) const;
......@@ -991,6 +996,7 @@ inline bool Path::operator< (PathPtr other) const { return PathPtr(*this) < oth
inline bool Path::operator> (PathPtr other) const { return PathPtr(*this) > other; }
inline bool Path::operator<=(PathPtr other) const { return PathPtr(*this) <= other; }
inline bool Path::operator>=(PathPtr other) const { return PathPtr(*this) >= other; }
inline uint Path::hashCode() const { return kj::hashCode(parts); }
inline bool Path::startsWith(PathPtr prefix) const { return PathPtr(*this).startsWith(prefix); }
inline bool Path::endsWith (PathPtr suffix) const { return PathPtr(*this).endsWith (suffix); }
inline String Path::toString(bool absolute) const { return PathPtr(*this).toString(absolute); }
......@@ -1020,6 +1026,7 @@ inline bool PathPtr::operator!=(PathPtr other) const { return !(*this == other);
inline bool PathPtr::operator> (PathPtr other) const { return other < *this; }
inline bool PathPtr::operator<=(PathPtr other) const { return !(other < *this); }
inline bool PathPtr::operator>=(PathPtr other) const { return !(*this < other); }
inline uint PathPtr::hashCode() const { return kj::hashCode(parts); }
inline String PathPtr::toWin32String(bool absolute) const {
return toWin32StringImpl(absolute, false);
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment