Commit b0868e34 authored by Kenton Varda's avatar Kenton Varda

Replace some STL maps with KJ maps in Cap'n Proto implementation.

So far this is only a small subset of all the STL uses.
parent e443282b
...@@ -98,11 +98,10 @@ SegmentReader* ReaderArena::tryGetSegment(SegmentId id) { ...@@ -98,11 +98,10 @@ SegmentReader* ReaderArena::tryGetSegment(SegmentId id) {
SegmentMap* segments = nullptr; SegmentMap* segments = nullptr;
KJ_IF_MAYBE(s, *lock) { KJ_IF_MAYBE(s, *lock) {
auto iter = s->get()->find(id.value); KJ_IF_MAYBE(segment, s->find(id.value)) {
if (iter != s->get()->end()) { return *segment;
return iter->second;
} }
segments = *s; segments = s;
} }
kj::ArrayPtr<const word> newSegment = message->getSegment(id.value); kj::ArrayPtr<const word> newSegment = message->getSegment(id.value);
...@@ -114,15 +113,13 @@ SegmentReader* ReaderArena::tryGetSegment(SegmentId id) { ...@@ -114,15 +113,13 @@ SegmentReader* ReaderArena::tryGetSegment(SegmentId id) {
if (*lock == nullptr) { if (*lock == nullptr) {
// OK, the segment exists, so allocate the map. // OK, the segment exists, so allocate the map.
auto s = kj::heap<SegmentMap>(); segments = &lock->emplace();
segments = s;
*lock = kj::mv(s);
} }
auto segment = kj::heap<SegmentReader>( auto segment = kj::heap<SegmentReader>(
this, id, newSegment.begin(), newSegmentSize, &readLimiter); this, id, newSegment.begin(), newSegmentSize, &readLimiter);
SegmentReader* result = segment; SegmentReader* result = segment;
segments->insert(std::make_pair(id.value, mv(segment))); segments->insert(id.value, kj::mv(segment));
return result; return result;
} }
......
...@@ -37,7 +37,7 @@ ...@@ -37,7 +37,7 @@
#include "common.h" #include "common.h"
#include "message.h" #include "message.h"
#include "layout.h" #include "layout.h"
#include <unordered_map> #include <kj/map.h>
#if !CAPNP_LITE #if !CAPNP_LITE
#include "capability.h" #include "capability.h"
...@@ -241,8 +241,8 @@ private: ...@@ -241,8 +241,8 @@ private:
// Optimize for single-segment messages so that small messages are handled quickly. // Optimize for single-segment messages so that small messages are handled quickly.
SegmentReader segment0; SegmentReader segment0;
typedef std::unordered_map<uint, kj::Own<SegmentReader>> SegmentMap; typedef kj::HashMap<uint, kj::Own<SegmentReader>> SegmentMap;
kj::MutexGuarded<kj::Maybe<kj::Own<SegmentMap>>> moreSegments; kj::MutexGuarded<kj::Maybe<SegmentMap>> moreSegments;
// We need to mutex-guard the segment map because we lazily initialize segments when they are // We need to mutex-guard the segment map because we lazily initialize segments when they are
// first requested, but a Reader is allowed to be used concurrently in multiple threads. Luckily // first requested, but a Reader is allowed to be used concurrently in multiple threads. Luckily
// this only applies to large messages. // this only applies to large messages.
......
...@@ -46,6 +46,7 @@ ...@@ -46,6 +46,7 @@
#include <capnp/compat/json.h> #include <capnp/compat/json.h>
#include <errno.h> #include <errno.h>
#include <stdlib.h> #include <stdlib.h>
#include <kj/map.h>
#if _WIN32 #if _WIN32
#include <process.h> #include <process.h>
...@@ -1767,17 +1768,16 @@ private: ...@@ -1767,17 +1768,16 @@ private:
// require to function. // require to function.
struct SourceDirectory { struct SourceDirectory {
kj::Path path;
kj::Own<const kj::ReadableDirectory> dir; kj::Own<const kj::ReadableDirectory> dir;
bool isSourcePrefix; bool isSourcePrefix;
}; };
std::map<kj::PathPtr, SourceDirectory> sourceDirectories; kj::HashMap<kj::Path, SourceDirectory> sourceDirectories;
// For each import path and source prefix, tracks the directory object we opened for it. // For each import path and source prefix, tracks the directory object we opened for it.
// //
// Use via getSourceDirectory(). // Use via getSourceDirectory().
std::map<const kj::ReadableDirectory*, kj::String> dirPrefixes; kj::HashMap<const kj::ReadableDirectory*, kj::String> dirPrefixes;
// For each open directory object, maps to a path prefix to add when displaying this path in // For each open directory object, maps to a path prefix to add when displaying this path in
// error messages. This keeps track of the original directory name as given by the user, before // error messages. This keeps track of the original directory name as given by the user, before
// canonicalization. // canonicalization.
...@@ -1827,10 +1827,9 @@ private: ...@@ -1827,10 +1827,9 @@ private:
if (path.size() == 0) return disk->getRoot(); if (path.size() == 0) return disk->getRoot();
auto iter = sourceDirectories.find(path); KJ_IF_MAYBE(sdir, sourceDirectories.find(path)) {
if (iter != sourceDirectories.end()) { sdir->isSourcePrefix = sdir->isSourcePrefix || isSourcePrefix;
iter->second.isSourcePrefix = iter->second.isSourcePrefix || isSourcePrefix; return *sdir->dir;
return *iter->second.dir;
} }
if (path == cwd) { if (path == cwd) {
...@@ -1843,26 +1842,22 @@ private: ...@@ -1843,26 +1842,22 @@ private:
// getDisplayName(). // getDisplayName().
auto& result = disk->getCurrent(); auto& result = disk->getCurrent();
if (isSourcePrefix) { if (isSourcePrefix) {
kj::PathPtr key = path;
kj::Own<const kj::ReadableDirectory> fakeOwn(&result, kj::NullDisposer::instance); kj::Own<const kj::ReadableDirectory> fakeOwn(&result, kj::NullDisposer::instance);
KJ_ASSERT(sourceDirectories.insert(std::make_pair(key, sourceDirectories.insert(kj::mv(path), { kj::mv(fakeOwn), isSourcePrefix });
SourceDirectory { kj::mv(path), kj::mv(fakeOwn), isSourcePrefix })).second);
} }
return result; return result;
} }
KJ_IF_MAYBE(dir, disk->getRoot().tryOpenSubdir(path)) { KJ_IF_MAYBE(dir, disk->getRoot().tryOpenSubdir(path)) {
auto& result = *dir->get(); auto& result = *dir->get();
kj::PathPtr key = path; sourceDirectories.insert(kj::mv(path), { kj::mv(*dir), isSourcePrefix });
KJ_ASSERT(sourceDirectories.insert(std::make_pair(key,
SourceDirectory { kj::mv(path), kj::mv(*dir), isSourcePrefix })).second);
#if _WIN32 #if _WIN32
kj::String prefix = pathStr.endsWith("/") || pathStr.endsWith("\\") kj::String prefix = pathStr.endsWith("/") || pathStr.endsWith("\\")
? kj::str(pathStr) : kj::str(pathStr, '\\'); ? kj::str(pathStr) : kj::str(pathStr, '\\');
#else #else
kj::String prefix = pathStr.endsWith("/") ? kj::str(pathStr) : kj::str(pathStr, '/'); kj::String prefix = pathStr.endsWith("/") ? kj::str(pathStr) : kj::str(pathStr, '/');
#endif #endif
KJ_ASSERT(dirPrefixes.insert(std::make_pair(&result, kj::mv(prefix))).second); dirPrefixes.insert(&result, kj::mv(prefix));
return result; return result;
} else { } else {
return nullptr; return nullptr;
...@@ -1883,9 +1878,8 @@ private: ...@@ -1883,9 +1878,8 @@ private:
auto prefix = path.slice(0, i); auto prefix = path.slice(0, i);
auto remainder = path.slice(i, path.size()); auto remainder = path.slice(i, path.size());
auto iter = sourceDirectories.find(prefix); KJ_IF_MAYBE(sdir, sourceDirectories.find(prefix)) {
if (iter != sourceDirectories.end() && iter->second.isSourcePrefix) { return { *sdir->dir, remainder.clone() };
return { *iter->second.dir, remainder.clone() };
} }
} }
...@@ -1914,9 +1908,8 @@ private: ...@@ -1914,9 +1908,8 @@ private:
} }
kj::String getDisplayName(const kj::ReadableDirectory& dir, kj::PathPtr path) { kj::String getDisplayName(const kj::ReadableDirectory& dir, kj::PathPtr path) {
auto iter = dirPrefixes.find(&dir); KJ_IF_MAYBE(prefix, dirPrefixes.find(&dir)) {
if (iter != dirPrefixes.end()) { return kj::str(*prefix, path.toNativeString());
return kj::str(iter->second, path.toNativeString());
} else if (&dir == &disk->getRoot()) { } else if (&dir == &disk->getRoot()) {
return path.toNativeString(true); return path.toNativeString(true);
} else if (&dir == &disk->getCurrent()) { } else if (&dir == &disk->getCurrent()) {
......
...@@ -126,9 +126,9 @@ private: ...@@ -126,9 +126,9 @@ private:
// Space in which we can construct a ReaderArena. We don't use ReaderArena directly here // Space in which we can construct a ReaderArena. We don't use ReaderArena directly here
// because we don't want clients to have to #include arena.h, which itself includes a bunch of // because we don't want clients to have to #include arena.h, which itself includes a bunch of
// big STL headers. We don't use a pointer to a ReaderArena because that would require an // other headers. We don't use a pointer to a ReaderArena because that would require an
// extra malloc on every message which could be expensive when processing small messages. // extra malloc on every message which could be expensive when processing small messages.
void* arenaSpace[15 + sizeof(kj::MutexGuarded<void*>) / sizeof(void*)]; void* arenaSpace[17 + sizeof(kj::MutexGuarded<void*>) / sizeof(void*)];
bool allocatedArena; bool allocatedArena;
_::ReaderArena* arena() { return reinterpret_cast<_::ReaderArena*>(arenaSpace); } _::ReaderArena* arena() { return reinterpret_cast<_::ReaderArena*>(arenaSpace); }
......
...@@ -21,9 +21,6 @@ ...@@ -21,9 +21,6 @@
#define CAPNP_PRIVATE #define CAPNP_PRIVATE
#include "schema-loader.h" #include "schema-loader.h"
#include <unordered_map>
#include <unordered_set>
#include <map>
#include "message.h" #include "message.h"
#include "arena.h" #include "arena.h"
#include <kj/debug.h> #include <kj/debug.h>
...@@ -31,6 +28,7 @@ ...@@ -31,6 +28,7 @@
#include <kj/arena.h> #include <kj/arena.h>
#include <kj/vector.h> #include <kj/vector.h>
#include <algorithm> #include <algorithm>
#include <kj/map.h>
#if _MSC_VER #if _MSC_VER
#include <atomic> #include <atomic>
...@@ -40,27 +38,6 @@ namespace capnp { ...@@ -40,27 +38,6 @@ namespace capnp {
namespace { namespace {
struct ByteArrayHash {
size_t operator()(kj::ArrayPtr<const byte> bytes) const {
// FNV hash. Probably sucks, but the code is simple.
//
// TODO(perf): Add CityHash or something to KJ and use it here.
uint64_t hash = 0xcbf29ce484222325ull;
for (byte b: bytes) {
hash = hash * 0x100000001b3ull;
hash ^= b;
}
return hash;
}
};
struct ByteArrayEq {
bool operator()(kj::ArrayPtr<const byte> a, kj::ArrayPtr<const byte> b) const {
return a.size() == b.size() && memcmp(a.begin(), b.begin(), a.size()) == 0;
}
};
struct SchemaBindingsPair { struct SchemaBindingsPair {
const _::RawSchema* schema; const _::RawSchema* schema;
const _::RawBrandedSchema::Scope* scopeBindings; const _::RawBrandedSchema::Scope* scopeBindings;
...@@ -68,12 +45,8 @@ struct SchemaBindingsPair { ...@@ -68,12 +45,8 @@ struct SchemaBindingsPair {
inline bool operator==(const SchemaBindingsPair& other) const { inline bool operator==(const SchemaBindingsPair& other) const {
return schema == other.schema && scopeBindings == other.scopeBindings; return schema == other.schema && scopeBindings == other.scopeBindings;
} }
}; inline uint hashCode() const {
return kj::hashCode(schema, scopeBindings);
struct SchemaBindingsPairHash {
size_t operator()(SchemaBindingsPair pair) const {
return 31 * reinterpret_cast<uintptr_t>(pair.schema) +
reinterpret_cast<uintptr_t>(pair.scopeBindings);
} }
}; };
...@@ -150,19 +123,19 @@ public: ...@@ -150,19 +123,19 @@ public:
kj::Arena arena; kj::Arena arena;
private: private:
std::unordered_set<kj::ArrayPtr<const byte>, ByteArrayHash, ByteArrayEq> dedupTable; kj::HashSet<kj::ArrayPtr<const byte>> dedupTable;
// Records raw segments of memory in the arena against which we my want to de-dupe later // Records raw segments of memory in the arena against which we my want to de-dupe later
// additions. Specifically, RawBrandedSchema binding tables are de-duped. // additions. Specifically, RawBrandedSchema binding tables are de-duped.
std::unordered_map<uint64_t, _::RawSchema*> schemas; kj::HashMap<uint64_t, _::RawSchema*> schemas;
std::unordered_map<SchemaBindingsPair, _::RawBrandedSchema*, SchemaBindingsPairHash> brands; kj::HashMap<SchemaBindingsPair, _::RawBrandedSchema*> brands;
std::unordered_map<const _::RawSchema*, _::RawBrandedSchema*> unboundBrands; kj::HashMap<const _::RawSchema*, _::RawBrandedSchema*> unboundBrands;
struct RequiredSize { struct RequiredSize {
uint16_t dataWordCount; uint16_t dataWordCount;
uint16_t pointerCount; uint16_t pointerCount;
}; };
std::unordered_map<uint64_t, RequiredSize> structSizeRequirements; kj::HashMap<uint64_t, RequiredSize> structSizeRequirements;
InitializerImpl initializer; InitializerImpl initializer;
BrandedInitializerImpl brandedInitializer; BrandedInitializerImpl brandedInitializer;
...@@ -285,7 +258,7 @@ public: ...@@ -285,7 +258,7 @@ public:
loader.arena.allocateArray<const _::RawSchema*>(*count); loader.arena.allocateArray<const _::RawSchema*>(*count);
uint pos = 0; uint pos = 0;
for (auto& dep: dependencies) { for (auto& dep: dependencies) {
result[pos++] = dep.second; result[pos++] = dep.value;
} }
KJ_DASSERT(pos == *count); KJ_DASSERT(pos == *count);
return result.begin(); return result.begin();
...@@ -296,7 +269,7 @@ public: ...@@ -296,7 +269,7 @@ public:
kj::ArrayPtr<uint16_t> result = loader.arena.allocateArray<uint16_t>(*count); kj::ArrayPtr<uint16_t> result = loader.arena.allocateArray<uint16_t>(*count);
uint pos = 0; uint pos = 0;
for (auto& member: members) { for (auto& member: members) {
result[pos++] = member.second; result[pos++] = member.value;
} }
KJ_DASSERT(pos == *count); KJ_DASSERT(pos == *count);
return result.begin(); return result.begin();
...@@ -310,10 +283,14 @@ private: ...@@ -310,10 +283,14 @@ private:
SchemaLoader::Impl& loader; SchemaLoader::Impl& loader;
Text::Reader nodeName; Text::Reader nodeName;
bool isValid; bool isValid;
std::map<uint64_t, _::RawSchema*> dependencies;
// Maps type IDs -> compiled schemas for each dependency.
// Order is important because makeDependencyArray() compiles a sorted array.
kj::TreeMap<uint64_t, _::RawSchema*> dependencies;
// Maps name -> index for each member. // Maps name -> index for each member.
std::map<Text::Reader, uint> members; // Order is important because makeMemberInfoArray() compiles a sorted array.
kj::TreeMap<Text::Reader, uint> members;
kj::ArrayPtr<uint16_t> membersByDiscriminant; kj::ArrayPtr<uint16_t> membersByDiscriminant;
...@@ -323,8 +300,9 @@ private: ...@@ -323,8 +300,9 @@ private:
KJ_FAIL_REQUIRE(__VA_ARGS__) { isValid = false; return; } KJ_FAIL_REQUIRE(__VA_ARGS__) { isValid = false; return; }
void validateMemberName(kj::StringPtr name, uint index) { void validateMemberName(kj::StringPtr name, uint index) {
bool isNewName = members.insert(std::make_pair(name, index)).second; members.upsert(name, index, [&](auto&, auto&&) {
VALIDATE_SCHEMA(isNewName, "duplicate name", name); FAIL_VALIDATE_SCHEMA("duplicate name", name);
});
} }
void validate(const schema::Node::Struct::Reader& structNode, uint64_t scopeId) { void validate(const schema::Node::Struct::Reader& structNode, uint64_t scopeId) {
...@@ -625,12 +603,13 @@ private: ...@@ -625,12 +603,13 @@ private:
VALIDATE_SCHEMA(node.which() == expectedKind, VALIDATE_SCHEMA(node.which() == expectedKind,
"expected a different kind of node for this ID", "expected a different kind of node for this ID",
id, (uint)expectedKind, (uint)node.which(), node.getDisplayName()); id, (uint)expectedKind, (uint)node.which(), node.getDisplayName());
dependencies.insert(std::make_pair(id, existing)); dependencies.upsert(id, existing, [](auto&,auto&&) { /* ignore dupe */ });
return; return;
} }
dependencies.insert(std::make_pair(id, loader.loadEmpty( dependencies.upsert(id, loader.loadEmpty(
id, kj::str("(unknown type used by ", nodeName , ")"), expectedKind, true))); id, kj::str("(unknown type used by ", nodeName , ")"), expectedKind, true),
[](auto&,auto&&) { /* ignore dupe */ });
} }
#undef VALIDATE_SCHEMA #undef VALIDATE_SCHEMA
...@@ -1263,49 +1242,52 @@ _::RawSchema* SchemaLoader::Impl::load(const schema::Node::Reader& reader, bool ...@@ -1263,49 +1242,52 @@ _::RawSchema* SchemaLoader::Impl::load(const schema::Node::Reader& reader, bool
} }
// Check if we already have a schema for this ID. // Check if we already have a schema for this ID.
_::RawSchema*& slot = schemas[validatedReader.getId()]; _::RawSchema* schema;
bool shouldReplace; bool shouldReplace;
bool shouldClearInitializer; bool shouldClearInitializer;
if (slot == nullptr) { KJ_IF_MAYBE(match, schemas.find(validatedReader.getId())) {
// Nope, allocate a new RawSchema.
slot = &arena.allocate<_::RawSchema>();
memset(&slot->defaultBrand, 0, sizeof(slot->defaultBrand));
slot->id = validatedReader.getId();
slot->canCastTo = nullptr;
slot->defaultBrand.generic = slot;
slot->lazyInitializer = isPlaceholder ? &initializer : nullptr;
slot->defaultBrand.lazyInitializer = isPlaceholder ? &brandedInitializer : nullptr;
shouldReplace = true;
shouldClearInitializer = false;
} else {
// Yes, check if it is compatible and figure out which schema is newer. // Yes, check if it is compatible and figure out which schema is newer.
// If the existing slot is a placeholder, but we're upgrading it to a non-placeholder, we schema = *match;
// If the existing schema is a placeholder, but we're upgrading it to a non-placeholder, we
// need to clear the initializer later. // need to clear the initializer later.
shouldClearInitializer = slot->lazyInitializer != nullptr && !isPlaceholder; shouldClearInitializer = schema->lazyInitializer != nullptr && !isPlaceholder;
auto existing = readMessageUnchecked<schema::Node>(slot->encodedNode); auto existing = readMessageUnchecked<schema::Node>(schema->encodedNode);
CompatibilityChecker checker(*this); CompatibilityChecker checker(*this);
// Prefer to replace the existing schema if the existing schema is a placeholder. Otherwise, // Prefer to replace the existing schema if the existing schema is a placeholder. Otherwise,
// prefer to keep the existing schema. // prefer to keep the existing schema.
shouldReplace = checker.shouldReplace( shouldReplace = checker.shouldReplace(
existing, validatedReader, slot->lazyInitializer != nullptr); existing, validatedReader, schema->lazyInitializer != nullptr);
} else {
// Nope, allocate a new RawSchema.
schema = &arena.allocate<_::RawSchema>();
memset(&schema->defaultBrand, 0, sizeof(schema->defaultBrand));
schema->id = validatedReader.getId();
schema->canCastTo = nullptr;
schema->defaultBrand.generic = schema;
schema->lazyInitializer = isPlaceholder ? &initializer : nullptr;
schema->defaultBrand.lazyInitializer = isPlaceholder ? &brandedInitializer : nullptr;
shouldReplace = true;
shouldClearInitializer = false;
schemas.insert(validatedReader.getId(), schema);
} }
if (shouldReplace) { if (shouldReplace) {
// Initialize the RawSchema. // Initialize the RawSchema.
slot->encodedNode = validated.begin(); schema->encodedNode = validated.begin();
slot->encodedSize = validated.size(); schema->encodedSize = validated.size();
slot->dependencies = validator.makeDependencyArray(&slot->dependencyCount); schema->dependencies = validator.makeDependencyArray(&schema->dependencyCount);
slot->membersByName = validator.makeMemberInfoArray(&slot->memberCount); schema->membersByName = validator.makeMemberInfoArray(&schema->memberCount);
slot->membersByDiscriminant = validator.makeMembersByDiscriminantArray(); schema->membersByDiscriminant = validator.makeMembersByDiscriminantArray();
// Even though this schema isn't itself branded, it may have dependencies that are. So, we // Even though this schema isn't itself branded, it may have dependencies that are. So, we
// need to set up the "dependencies" map under defaultBrand. // need to set up the "dependencies" map under defaultBrand.
auto deps = makeBrandedDependencies(slot, kj::ArrayPtr<const _::RawBrandedSchema::Scope>()); auto deps = makeBrandedDependencies(schema, kj::ArrayPtr<const _::RawBrandedSchema::Scope>());
slot->defaultBrand.dependencies = deps.begin(); schema->defaultBrand.dependencies = deps.begin();
slot->defaultBrand.dependencyCount = deps.size(); schema->defaultBrand.dependencyCount = deps.size();
} }
if (shouldClearInitializer) { if (shouldClearInitializer) {
...@@ -1313,94 +1295,91 @@ _::RawSchema* SchemaLoader::Impl::load(const schema::Node::Reader& reader, bool ...@@ -1313,94 +1295,91 @@ _::RawSchema* SchemaLoader::Impl::load(const schema::Node::Reader& reader, bool
// dependency list of other schemas. Once the initializer is null, it is live, so we must do // dependency list of other schemas. Once the initializer is null, it is live, so we must do
// a release-store here. // a release-store here.
#if __GNUC__ #if __GNUC__
__atomic_store_n(&slot->lazyInitializer, nullptr, __ATOMIC_RELEASE); __atomic_store_n(&schema->lazyInitializer, nullptr, __ATOMIC_RELEASE);
__atomic_store_n(&slot->defaultBrand.lazyInitializer, nullptr, __ATOMIC_RELEASE); __atomic_store_n(&schema->defaultBrand.lazyInitializer, nullptr, __ATOMIC_RELEASE);
#elif _MSC_VER #elif _MSC_VER
std::atomic_thread_fence(std::memory_order_release); std::atomic_thread_fence(std::memory_order_release);
*static_cast<_::RawSchema::Initializer const* volatile*>(&slot->lazyInitializer) = nullptr; *static_cast<_::RawSchema::Initializer const* volatile*>(&schema->lazyInitializer) = nullptr;
*static_cast<_::RawBrandedSchema::Initializer const* volatile*>( *static_cast<_::RawBrandedSchema::Initializer const* volatile*>(
&slot->defaultBrand.lazyInitializer) = nullptr; &schema->defaultBrand.lazyInitializer) = nullptr;
#else #else
#error "Platform not supported" #error "Platform not supported"
#endif #endif
} }
return slot; return schema;
} }
_::RawSchema* SchemaLoader::Impl::loadNative(const _::RawSchema* nativeSchema) { _::RawSchema* SchemaLoader::Impl::loadNative(const _::RawSchema* nativeSchema) {
_::RawSchema*& slot = schemas[nativeSchema->id]; _::RawSchema* schema;
bool shouldReplace; bool shouldReplace;
bool shouldClearInitializer; bool shouldClearInitializer;
if (slot == nullptr) { KJ_IF_MAYBE(match, schemas.find(nativeSchema->id)) {
slot = &arena.allocate<_::RawSchema>(); schema = *match;
memset(&slot->defaultBrand, 0, sizeof(slot->defaultBrand)); if (schema->canCastTo != nullptr) {
slot->defaultBrand.generic = slot; // Already loaded natively, or we're currently in the process of loading natively and there
slot->lazyInitializer = nullptr; // was a dependency cycle.
slot->defaultBrand.lazyInitializer = nullptr; KJ_REQUIRE(schema->canCastTo == nativeSchema,
"two different compiled-in type have the same type ID",
nativeSchema->id,
readMessageUnchecked<schema::Node>(nativeSchema->encodedNode).getDisplayName(),
readMessageUnchecked<schema::Node>(schema->canCastTo->encodedNode).getDisplayName());
return schema;
} else {
auto existing = readMessageUnchecked<schema::Node>(schema->encodedNode);
auto native = readMessageUnchecked<schema::Node>(nativeSchema->encodedNode);
CompatibilityChecker checker(*this);
shouldReplace = checker.shouldReplace(existing, native, true);
shouldClearInitializer = schema->lazyInitializer != nullptr;
}
} else {
schema = &arena.allocate<_::RawSchema>();
memset(&schema->defaultBrand, 0, sizeof(schema->defaultBrand));
schema->defaultBrand.generic = schema;
schema->lazyInitializer = nullptr;
schema->defaultBrand.lazyInitializer = nullptr;
shouldReplace = true; shouldReplace = true;
shouldClearInitializer = false; // already cleared above shouldClearInitializer = false; // already cleared above
} else if (slot->canCastTo != nullptr) { schemas.insert(nativeSchema->id, schema);
// Already loaded natively, or we're currently in the process of loading natively and there
// was a dependency cycle.
KJ_REQUIRE(slot->canCastTo == nativeSchema,
"two different compiled-in type have the same type ID",
nativeSchema->id,
readMessageUnchecked<schema::Node>(nativeSchema->encodedNode).getDisplayName(),
readMessageUnchecked<schema::Node>(slot->canCastTo->encodedNode).getDisplayName());
return slot;
} else {
auto existing = readMessageUnchecked<schema::Node>(slot->encodedNode);
auto native = readMessageUnchecked<schema::Node>(nativeSchema->encodedNode);
CompatibilityChecker checker(*this);
shouldReplace = checker.shouldReplace(existing, native, true);
shouldClearInitializer = slot->lazyInitializer != nullptr;
} }
// Since we recurse below, the slot in the hash map could move around. Copy out the pointer
// for subsequent use.
// TODO(cleanup): Above comment is actually not true of unordered_map. Leaving here to explain
// code pattern below.
_::RawSchema* result = slot;
if (shouldReplace) { if (shouldReplace) {
// Set the schema to a copy of the native schema, but make sure not to null out lazyInitializer // Set the schema to a copy of the native schema, but make sure not to null out lazyInitializer
// yet. // yet.
_::RawSchema temp = *nativeSchema; _::RawSchema temp = *nativeSchema;
temp.lazyInitializer = result->lazyInitializer; temp.lazyInitializer = schema->lazyInitializer;
*result = temp; *schema = temp;
result->defaultBrand.generic = result; schema->defaultBrand.generic = schema;
// Indicate that casting is safe. Note that it's important to set this before recursively // Indicate that casting is safe. Note that it's important to set this before recursively
// loading dependencies, so that cycles don't cause infinite loops! // loading dependencies, so that cycles don't cause infinite loops!
result->canCastTo = nativeSchema; schema->canCastTo = nativeSchema;
// We need to set the dependency list to point at other loader-owned RawSchemas. // We need to set the dependency list to point at other loader-owned RawSchemas.
kj::ArrayPtr<const _::RawSchema*> dependencies = kj::ArrayPtr<const _::RawSchema*> dependencies =
arena.allocateArray<const _::RawSchema*>(result->dependencyCount); arena.allocateArray<const _::RawSchema*>(schema->dependencyCount);
for (uint i = 0; i < nativeSchema->dependencyCount; i++) { for (uint i = 0; i < nativeSchema->dependencyCount; i++) {
dependencies[i] = loadNative(nativeSchema->dependencies[i]); dependencies[i] = loadNative(nativeSchema->dependencies[i]);
} }
result->dependencies = dependencies.begin(); schema->dependencies = dependencies.begin();
// Also need to re-do the branded dependencies. // Also need to re-do the branded dependencies.
auto deps = makeBrandedDependencies(slot, kj::ArrayPtr<const _::RawBrandedSchema::Scope>()); auto deps = makeBrandedDependencies(schema, kj::ArrayPtr<const _::RawBrandedSchema::Scope>());
slot->defaultBrand.dependencies = deps.begin(); schema->defaultBrand.dependencies = deps.begin();
slot->defaultBrand.dependencyCount = deps.size(); schema->defaultBrand.dependencyCount = deps.size();
// If there is a struct size requirement, we need to make sure that it is satisfied. // If there is a struct size requirement, we need to make sure that it is satisfied.
auto reqIter = structSizeRequirements.find(nativeSchema->id); KJ_IF_MAYBE(sizeReq, structSizeRequirements.find(nativeSchema->id)) {
if (reqIter != structSizeRequirements.end()) { applyStructSizeRequirement(schema, sizeReq->dataWordCount,
applyStructSizeRequirement(result, reqIter->second.dataWordCount, sizeReq->pointerCount);
reqIter->second.pointerCount);
} }
} else { } else {
// The existing schema is newer. // The existing schema is newer.
// Indicate that casting is safe. Note that it's important to set this before recursively // Indicate that casting is safe. Note that it's important to set this before recursively
// loading dependencies, so that cycles don't cause infinite loops! // loading dependencies, so that cycles don't cause infinite loops!
result->canCastTo = nativeSchema; schema->canCastTo = nativeSchema;
// Make sure the dependencies are loaded and compatible. // Make sure the dependencies are loaded and compatible.
for (uint i = 0; i < nativeSchema->dependencyCount; i++) { for (uint i = 0; i < nativeSchema->dependencyCount; i++) {
...@@ -1413,19 +1392,19 @@ _::RawSchema* SchemaLoader::Impl::loadNative(const _::RawSchema* nativeSchema) { ...@@ -1413,19 +1392,19 @@ _::RawSchema* SchemaLoader::Impl::loadNative(const _::RawSchema* nativeSchema) {
// dependency list of other schemas. Once the initializer is null, it is live, so we must do // dependency list of other schemas. Once the initializer is null, it is live, so we must do
// a release-store here. // a release-store here.
#if __GNUC__ #if __GNUC__
__atomic_store_n(&result->lazyInitializer, nullptr, __ATOMIC_RELEASE); __atomic_store_n(&schema->lazyInitializer, nullptr, __ATOMIC_RELEASE);
__atomic_store_n(&result->defaultBrand.lazyInitializer, nullptr, __ATOMIC_RELEASE); __atomic_store_n(&schema->defaultBrand.lazyInitializer, nullptr, __ATOMIC_RELEASE);
#elif _MSC_VER #elif _MSC_VER
std::atomic_thread_fence(std::memory_order_release); std::atomic_thread_fence(std::memory_order_release);
*static_cast<_::RawSchema::Initializer const* volatile*>(&result->lazyInitializer) = nullptr; *static_cast<_::RawSchema::Initializer const* volatile*>(&schema->lazyInitializer) = nullptr;
*static_cast<_::RawBrandedSchema::Initializer const* volatile*>( *static_cast<_::RawBrandedSchema::Initializer const* volatile*>(
&result->defaultBrand.lazyInitializer) = nullptr; &schema->defaultBrand.lazyInitializer) = nullptr;
#else #else
#error "Platform not supported" #error "Platform not supported"
#endif #endif
} }
return result; return schema;
} }
_::RawSchema* SchemaLoader::Impl::loadEmpty( _::RawSchema* SchemaLoader::Impl::loadEmpty(
...@@ -1539,20 +1518,20 @@ const _::RawBrandedSchema* SchemaLoader::Impl::makeBranded( ...@@ -1539,20 +1518,20 @@ const _::RawBrandedSchema* SchemaLoader::Impl::makeBranded(
return &schema->defaultBrand; return &schema->defaultBrand;
} }
auto& slot = brands[SchemaBindingsPair { schema, bindings.begin() }]; SchemaBindingsPair key { schema, bindings.begin() };
KJ_IF_MAYBE(existing, brands.find(key)) {
if (slot == nullptr) { return *existing;
} else {
auto& brand = arena.allocate<_::RawBrandedSchema>(); auto& brand = arena.allocate<_::RawBrandedSchema>();
memset(&brand, 0, sizeof(brand)); memset(&brand, 0, sizeof(brand));
slot = &brand; brands.insert(key, &brand);
brand.generic = schema; brand.generic = schema;
brand.scopes = bindings.begin(); brand.scopes = bindings.begin();
brand.scopeCount = bindings.size(); brand.scopeCount = bindings.size();
brand.lazyInitializer = &brandedInitializer; brand.lazyInitializer = &brandedInitializer;
return &brand;
} }
return slot;
} }
kj::ArrayPtr<const _::RawBrandedSchema::Dependency> kj::ArrayPtr<const _::RawBrandedSchema::Dependency>
...@@ -1783,16 +1762,15 @@ kj::ArrayPtr<const T> SchemaLoader::Impl::copyDeduped(kj::ArrayPtr<const T> valu ...@@ -1783,16 +1762,15 @@ kj::ArrayPtr<const T> SchemaLoader::Impl::copyDeduped(kj::ArrayPtr<const T> valu
auto bytes = values.asBytes(); auto bytes = values.asBytes();
auto iter = dedupTable.find(bytes); KJ_IF_MAYBE(dupe, dedupTable.find(bytes)) {
if (iter != dedupTable.end()) { return kj::arrayPtr(reinterpret_cast<const T*>(dupe->begin()), values.size());
return kj::arrayPtr(reinterpret_cast<const T*>(iter->begin()), values.size());
} }
// Need to make a new copy. // Need to make a new copy.
auto copy = arena.allocateArray<T>(values.size()); auto copy = arena.allocateArray<T>(values.size());
memcpy(copy.begin(), values.begin(), values.size() * sizeof(T)); memcpy(copy.begin(), values.begin(), values.size() * sizeof(T));
KJ_ASSERT(dedupTable.insert(copy.asBytes()).second); dedupTable.insert(copy.asBytes());
return copy; return copy;
} }
...@@ -1803,11 +1781,10 @@ kj::ArrayPtr<const T> SchemaLoader::Impl::copyDeduped(kj::ArrayPtr<T> values) { ...@@ -1803,11 +1781,10 @@ kj::ArrayPtr<const T> SchemaLoader::Impl::copyDeduped(kj::ArrayPtr<T> values) {
} }
SchemaLoader::Impl::TryGetResult SchemaLoader::Impl::tryGet(uint64_t typeId) const { SchemaLoader::Impl::TryGetResult SchemaLoader::Impl::tryGet(uint64_t typeId) const {
auto iter = schemas.find(typeId); KJ_IF_MAYBE(schema, schemas.find(typeId)) {
if (iter == schemas.end()) { return {*schema, initializer.getCallback()};
return {nullptr, initializer.getCallback()};
} else { } else {
return {iter->second, initializer.getCallback()}; return {nullptr, initializer.getCallback()};
} }
} }
...@@ -1817,43 +1794,45 @@ const _::RawBrandedSchema* SchemaLoader::Impl::getUnbound(const _::RawSchema* sc ...@@ -1817,43 +1794,45 @@ const _::RawBrandedSchema* SchemaLoader::Impl::getUnbound(const _::RawSchema* sc
return &schema->defaultBrand; return &schema->defaultBrand;
} }
auto& slot = unboundBrands[schema]; KJ_IF_MAYBE(existing, unboundBrands.find(schema)) {
if (slot == nullptr) { return *existing;
slot = &arena.allocate<_::RawBrandedSchema>(); } else {
auto slot = &arena.allocate<_::RawBrandedSchema>();
memset(slot, 0, sizeof(*slot)); memset(slot, 0, sizeof(*slot));
slot->generic = schema; slot->generic = schema;
auto deps = makeBrandedDependencies(schema, nullptr); auto deps = makeBrandedDependencies(schema, nullptr);
slot->dependencies = deps.begin(); slot->dependencies = deps.begin();
slot->dependencyCount = deps.size(); slot->dependencyCount = deps.size();
unboundBrands.insert(schema, slot);
return slot;
} }
return slot;
} }
kj::Array<Schema> SchemaLoader::Impl::getAllLoaded() const { kj::Array<Schema> SchemaLoader::Impl::getAllLoaded() const {
size_t count = 0; size_t count = 0;
for (auto& schema: schemas) { for (auto& schema: schemas) {
if (schema.second->lazyInitializer == nullptr) ++count; if (schema.value->lazyInitializer == nullptr) ++count;
} }
kj::Array<Schema> result = kj::heapArray<Schema>(count); kj::Array<Schema> result = kj::heapArray<Schema>(count);
size_t i = 0; size_t i = 0;
for (auto& schema: schemas) { for (auto& schema: schemas) {
if (schema.second->lazyInitializer == nullptr) { if (schema.value->lazyInitializer == nullptr) {
result[i++] = Schema(&schema.second->defaultBrand); result[i++] = Schema(&schema.value->defaultBrand);
} }
} }
return result; return result;
} }
void SchemaLoader::Impl::requireStructSize(uint64_t id, uint dataWordCount, uint pointerCount) { void SchemaLoader::Impl::requireStructSize(uint64_t id, uint dataWordCount, uint pointerCount) {
auto& slot = structSizeRequirements[id]; structSizeRequirements.upsert(id, { uint16_t(dataWordCount), uint16_t(pointerCount) },
slot.dataWordCount = kj::max(slot.dataWordCount, dataWordCount); [&](RequiredSize& existingValue, RequiredSize&& newValue) {
slot.pointerCount = kj::max(slot.pointerCount, pointerCount); existingValue.dataWordCount = kj::max(existingValue.dataWordCount, newValue.dataWordCount);
existingValue.pointerCount = kj::max(existingValue.pointerCount, newValue.pointerCount);
});
auto iter = schemas.find(id); KJ_IF_MAYBE(schema, schemas.find(id)) {
if (iter != schemas.end()) { applyStructSizeRequirement(*schema, dataWordCount, pointerCount);
applyStructSizeRequirement(iter->second, dataWordCount, pointerCount);
} }
} }
...@@ -1868,14 +1847,12 @@ kj::ArrayPtr<word> SchemaLoader::Impl::makeUncheckedNode(schema::Node::Reader no ...@@ -1868,14 +1847,12 @@ kj::ArrayPtr<word> SchemaLoader::Impl::makeUncheckedNode(schema::Node::Reader no
kj::ArrayPtr<word> SchemaLoader::Impl::makeUncheckedNodeEnforcingSizeRequirements( kj::ArrayPtr<word> SchemaLoader::Impl::makeUncheckedNodeEnforcingSizeRequirements(
schema::Node::Reader node) { schema::Node::Reader node) {
if (node.isStruct()) { if (node.isStruct()) {
auto iter = structSizeRequirements.find(node.getId()); KJ_IF_MAYBE(requirement, structSizeRequirements.find(node.getId())) {
if (iter != structSizeRequirements.end()) {
auto requirement = iter->second;
auto structNode = node.getStruct(); auto structNode = node.getStruct();
if (structNode.getDataWordCount() < requirement.dataWordCount || if (structNode.getDataWordCount() < requirement->dataWordCount ||
structNode.getPointerCount() < requirement.pointerCount) { structNode.getPointerCount() < requirement->pointerCount) {
return rewriteStructNodeWithSizes(node, requirement.dataWordCount, return rewriteStructNodeWithSizes(node, requirement->dataWordCount,
requirement.pointerCount); requirement->pointerCount);
} }
} }
} }
...@@ -1958,10 +1935,8 @@ void SchemaLoader::BrandedInitializerImpl::init(const _::RawBrandedSchema* schem ...@@ -1958,10 +1935,8 @@ void SchemaLoader::BrandedInitializerImpl::init(const _::RawBrandedSchema* schem
} }
// Get the mutable version. // Get the mutable version.
auto iter = lock->get()->brands.find(SchemaBindingsPair { schema->generic, schema->scopes }); _::RawBrandedSchema* mutableSchema = KJ_ASSERT_NONNULL(
KJ_ASSERT(iter != lock->get()->brands.end()); lock->get()->brands.find(SchemaBindingsPair { schema->generic, schema->scopes }));
_::RawBrandedSchema* mutableSchema = iter->second;
KJ_ASSERT(mutableSchema == schema); KJ_ASSERT(mutableSchema == schema);
// Construct its dependency map. // Construct its dependency map.
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <inttypes.h> #include <inttypes.h>
#include "time.h" #include "time.h"
#include "function.h" #include "function.h"
#include "hash.h"
namespace kj { namespace kj {
...@@ -156,6 +157,9 @@ public: ...@@ -156,6 +157,9 @@ public:
bool operator>=(PathPtr other) const; bool operator>=(PathPtr other) const;
// Compare path components lexically. // Compare path components lexically.
uint hashCode() const;
// Can use in HashMap.
bool startsWith(PathPtr prefix) const; bool startsWith(PathPtr prefix) const;
bool endsWith(PathPtr suffix) const; bool endsWith(PathPtr suffix) const;
// Compare prefix / suffix. // Compare prefix / suffix.
...@@ -264,6 +268,7 @@ public: ...@@ -264,6 +268,7 @@ public:
bool operator> (PathPtr other) const; bool operator> (PathPtr other) const;
bool operator<=(PathPtr other) const; bool operator<=(PathPtr other) const;
bool operator>=(PathPtr other) const; bool operator>=(PathPtr other) const;
uint hashCode() const;
bool startsWith(PathPtr prefix) const; bool startsWith(PathPtr prefix) const;
bool endsWith(PathPtr suffix) const; bool endsWith(PathPtr suffix) const;
Path evalWin32(StringPtr pathText) const; Path evalWin32(StringPtr pathText) const;
...@@ -991,6 +996,7 @@ inline bool Path::operator< (PathPtr other) const { return PathPtr(*this) < oth ...@@ -991,6 +996,7 @@ inline bool Path::operator< (PathPtr other) const { return PathPtr(*this) < oth
inline bool Path::operator> (PathPtr other) const { return PathPtr(*this) > other; } inline bool Path::operator> (PathPtr other) const { return PathPtr(*this) > other; }
inline bool Path::operator<=(PathPtr other) const { return PathPtr(*this) <= other; } inline bool Path::operator<=(PathPtr other) const { return PathPtr(*this) <= other; }
inline bool Path::operator>=(PathPtr other) const { return PathPtr(*this) >= other; } inline bool Path::operator>=(PathPtr other) const { return PathPtr(*this) >= other; }
inline uint Path::hashCode() const { return kj::hashCode(parts); }
inline bool Path::startsWith(PathPtr prefix) const { return PathPtr(*this).startsWith(prefix); } inline bool Path::startsWith(PathPtr prefix) const { return PathPtr(*this).startsWith(prefix); }
inline bool Path::endsWith (PathPtr suffix) const { return PathPtr(*this).endsWith (suffix); } inline bool Path::endsWith (PathPtr suffix) const { return PathPtr(*this).endsWith (suffix); }
inline String Path::toString(bool absolute) const { return PathPtr(*this).toString(absolute); } inline String Path::toString(bool absolute) const { return PathPtr(*this).toString(absolute); }
...@@ -1020,6 +1026,7 @@ inline bool PathPtr::operator!=(PathPtr other) const { return !(*this == other); ...@@ -1020,6 +1026,7 @@ inline bool PathPtr::operator!=(PathPtr other) const { return !(*this == other);
inline bool PathPtr::operator> (PathPtr other) const { return other < *this; } inline bool PathPtr::operator> (PathPtr other) const { return other < *this; }
inline bool PathPtr::operator<=(PathPtr other) const { return !(other < *this); } inline bool PathPtr::operator<=(PathPtr other) const { return !(other < *this); }
inline bool PathPtr::operator>=(PathPtr other) const { return !(*this < other); } inline bool PathPtr::operator>=(PathPtr other) const { return !(*this < other); }
inline uint PathPtr::hashCode() const { return kj::hashCode(parts); }
inline String PathPtr::toWin32String(bool absolute) const { inline String PathPtr::toWin32String(bool absolute) const {
return toWin32StringImpl(absolute, false); return toWin32StringImpl(absolute, false);
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment