Commit aa9c3115 authored by Kenton Varda's avatar Kenton Varda

Remove some more mutexes and atomics that I missed.

parent 91acb5b2
...@@ -47,7 +47,8 @@ public: ...@@ -47,7 +47,8 @@ public:
private: private:
Node& parent; Node& parent;
DeclName::Reader targetName; DeclName::Reader targetName;
kj::Lazy<kj::Maybe<Node&>> target; bool initialized = false;
kj::Maybe<Node&> target;
}; };
class Compiler::Node: public NodeTranslator::Resolver { class Compiler::Node: public NodeTranslator::Resolver {
...@@ -326,9 +327,11 @@ private: ...@@ -326,9 +327,11 @@ private:
// ======================================================================================= // =======================================================================================
kj::Maybe<Compiler::Node&> Compiler::Alias::getTarget() { kj::Maybe<Compiler::Node&> Compiler::Alias::getTarget() {
return target.get([this](kj::SpaceFor<kj::Maybe<Node&>>& space) { if (!initialized) {
return space.construct(parent.lookup(targetName)); initialized = true;
}); target = parent.lookup(targetName);
}
return target;
} }
// ======================================================================================= // =======================================================================================
......
...@@ -211,7 +211,7 @@ public: ...@@ -211,7 +211,7 @@ public:
private: private:
GlobalErrorReporter& errorReporter; GlobalErrorReporter& errorReporter;
kj::Vector<kj::String> searchPath; kj::Vector<kj::String> searchPath;
kj::MutexGuarded<std::map<kj::StringPtr, kj::Own<Module>>> modules; std::map<kj::StringPtr, kj::Own<Module>> modules;
}; };
class ModuleLoader::ModuleImpl final: public Module { class ModuleLoader::ModuleImpl final: public Module {
...@@ -278,10 +278,8 @@ kj::Maybe<Module&> ModuleLoader::Impl::loadModule( ...@@ -278,10 +278,8 @@ kj::Maybe<Module&> ModuleLoader::Impl::loadModule(
kj::String canonicalLocalName = canonicalizePath(localName); kj::String canonicalLocalName = canonicalizePath(localName);
kj::String canonicalSourceName = canonicalizePath(sourceName); kj::String canonicalSourceName = canonicalizePath(sourceName);
auto locked = modules.lockExclusive(); auto iter = modules.find(canonicalLocalName);
if (iter != modules.end()) {
auto iter = locked->find(canonicalLocalName);
if (iter != locked->end()) {
// Return existing file. // Return existing file.
return *iter->second; return *iter->second;
} }
...@@ -294,7 +292,7 @@ kj::Maybe<Module&> ModuleLoader::Impl::loadModule( ...@@ -294,7 +292,7 @@ kj::Maybe<Module&> ModuleLoader::Impl::loadModule(
auto module = kj::heap<ModuleImpl>( auto module = kj::heap<ModuleImpl>(
*this, kj::mv(canonicalLocalName), kj::mv(canonicalSourceName)); *this, kj::mv(canonicalLocalName), kj::mv(canonicalSourceName));
auto& result = *module; auto& result = *module;
locked->insert(std::make_pair(result.getLocalName(), kj::mv(module))); modules.insert(std::make_pair(result.getLocalName(), kj::mv(module)));
return result; return result;
} }
......
...@@ -896,18 +896,18 @@ private: ...@@ -896,18 +896,18 @@ private:
} }
kj::Maybe<ExportId> writeDescriptor(rpc::CapDescriptor::Builder descriptor) override { kj::Maybe<ExportId> writeDescriptor(rpc::CapDescriptor::Builder descriptor) override {
__atomic_store_n(&receivedCall, true, __ATOMIC_RELAXED); receivedCall = true;
return connectionState->writeDescriptor(*cap, descriptor); return connectionState->writeDescriptor(*cap, descriptor);
} }
kj::Maybe<kj::Own<ClientHook>> writeTarget( kj::Maybe<kj::Own<ClientHook>> writeTarget(
rpc::MessageTarget::Builder target) override { rpc::MessageTarget::Builder target) override {
__atomic_store_n(&receivedCall, true, __ATOMIC_RELAXED); receivedCall = true;
return connectionState->writeTarget(*cap, target); return connectionState->writeTarget(*cap, target);
} }
kj::Own<ClientHook> getInnermostClient() override { kj::Own<ClientHook> getInnermostClient() override {
__atomic_store_n(&receivedCall, true, __ATOMIC_RELAXED); receivedCall = true;
return connectionState->getInnermostClient(*cap); return connectionState->getInnermostClient(*cap);
} }
...@@ -915,13 +915,13 @@ private: ...@@ -915,13 +915,13 @@ private:
Request<ObjectPointer, ObjectPointer> newCall( Request<ObjectPointer, ObjectPointer> newCall(
uint64_t interfaceId, uint16_t methodId, uint firstSegmentWordSize) override { uint64_t interfaceId, uint16_t methodId, uint firstSegmentWordSize) override {
__atomic_store_n(&receivedCall, true, __ATOMIC_RELAXED); receivedCall = true;
return cap->newCall(interfaceId, methodId, firstSegmentWordSize); return cap->newCall(interfaceId, methodId, firstSegmentWordSize);
} }
VoidPromiseAndPipeline call(uint64_t interfaceId, uint16_t methodId, VoidPromiseAndPipeline call(uint64_t interfaceId, uint16_t methodId,
kj::Own<CallContextHook>&& context) override { kj::Own<CallContextHook>&& context) override {
__atomic_store_n(&receivedCall, true, __ATOMIC_RELAXED); receivedCall = true;
return cap->call(interfaceId, methodId, kj::mv(context)); return cap->call(interfaceId, methodId, kj::mv(context));
} }
...@@ -951,8 +951,7 @@ private: ...@@ -951,8 +951,7 @@ private:
bool receivedCall = false; bool receivedCall = false;
void resolve(kj::Own<ClientHook> replacement) { void resolve(kj::Own<ClientHook> replacement) {
if (replacement->getBrand() != connectionState.get() && if (replacement->getBrand() != connectionState.get() && receivedCall) {
__atomic_load_n(&receivedCall, __ATOMIC_RELAXED)) {
// The new capability is hosted locally, not on the remote machine. And, we had made calls // The new capability is hosted locally, not on the remote machine. And, we had made calls
// to the promise. We need to make sure those calls echo back to us before we allow new // to the promise. We need to make sure those calls echo back to us before we allow new
// calls to go directly to the local capability, so we need to set a local embargo and send // calls to go directly to the local capability, so we need to set a local embargo and send
...@@ -1984,8 +1983,10 @@ private: ...@@ -1984,8 +1983,10 @@ private:
// the RpcCallContext is now responsible for cleaning up the entry in the answer table, since // the RpcCallContext is now responsible for cleaning up the entry in the answer table, since
// a Finish message was already received. // a Finish message was already received.
if (__atomic_fetch_or(&cancellationFlags, CANCEL_REQUESTED, __ATOMIC_RELAXED) == bool previouslyAllowedButNotRequested = cancellationFlags == CANCEL_ALLOWED;
CANCEL_ALLOWED) { cancellationFlags |= CANCEL_REQUESTED;
if (previouslyAllowedButNotRequested) {
// We just set CANCEL_REQUESTED, and CANCEL_ALLOWED was already set previously. Initiate // We just set CANCEL_REQUESTED, and CANCEL_ALLOWED was already set previously. Initiate
// the cancellation. // the cancellation.
cancelFulfiller->fulfill(); cancelFulfiller->fulfill();
...@@ -2092,15 +2093,17 @@ private: ...@@ -2092,15 +2093,17 @@ private:
// at creation. // at creation.
KJ_REQUIRE(request == nullptr, "Must call releaseParams() before allowAsyncCancellation()."); KJ_REQUIRE(request == nullptr, "Must call releaseParams() before allowAsyncCancellation().");
if (__atomic_fetch_or(&cancellationFlags, CANCEL_ALLOWED, __ATOMIC_RELAXED) == bool previouslyRequestedButNotAllowed = cancellationFlags == CANCEL_REQUESTED;
CANCEL_REQUESTED) { cancellationFlags |= CANCEL_ALLOWED;
if (previouslyRequestedButNotAllowed) {
// We just set CANCEL_ALLOWED, and CANCEL_REQUESTED was already set previously. Initiate // We just set CANCEL_ALLOWED, and CANCEL_REQUESTED was already set previously. Initiate
// the cancellation. // the cancellation.
cancelFulfiller->fulfill(); cancelFulfiller->fulfill();
} }
} }
bool isCanceled() override { bool isCanceled() override {
return __atomic_load_n(&cancellationFlags, __ATOMIC_RELAXED) & CANCEL_REQUESTED; return cancellationFlags & CANCEL_REQUESTED;
} }
kj::Own<CallContextHook> addRef() override { kj::Own<CallContextHook> addRef() override {
return kj::addRef(*this); return kj::addRef(*this);
...@@ -2133,8 +2136,7 @@ private: ...@@ -2133,8 +2136,7 @@ private:
}; };
uint8_t cancellationFlags = 0; uint8_t cancellationFlags = 0;
// When both flags are set, the cancellation process will begin. Must be manipulated atomically // When both flags are set, the cancellation process will begin.
// as it may be accessed from multiple threads.
kj::Own<kj::PromiseFulfiller<void>> cancelFulfiller; kj::Own<kj::PromiseFulfiller<void>> cancelFulfiller;
// Fulfilled when cancellation has been both requested and permitted. The fulfilled promise is // Fulfilled when cancellation has been both requested and permitted. The fulfilled promise is
...@@ -2160,14 +2162,10 @@ private: ...@@ -2160,14 +2162,10 @@ private:
// answer table. Or we might even be responsible for removing the entire answer table // answer table. Or we might even be responsible for removing the entire answer table
// entry. // entry.
// TODO(cleanup): This code and the code that calls it is really ugly. Moving a lock as
// a parameter? Yuck. Maybe when cancellation and thread-safety are removed this will
// get simpler?
kj::Own<PipelineHook> pipelineToRelease; kj::Own<PipelineHook> pipelineToRelease;
Answer answerToDelete; Answer answerToDelete;
if (__atomic_load_n(&cancellationFlags, __ATOMIC_RELAXED) & CANCEL_REQUESTED) { if (cancellationFlags & CANCEL_REQUESTED) {
answerToDelete = kj::mv(connectionState->answers[questionId]); answerToDelete = kj::mv(connectionState->answers[questionId]);
// Erase from the table. // Erase from the table.
......
...@@ -126,11 +126,11 @@ public: ...@@ -126,11 +126,11 @@ public:
message); message);
// We intentionally only set hadErrors true if reportError() didn't throw. // We intentionally only set hadErrors true if reportError() didn't throw.
__atomic_store_n(&parser.hadErrors, true, __ATOMIC_RELAXED); parser.hadErrors = true;
} }
bool hadErrors() override { bool hadErrors() override {
return __atomic_load_n(&parser.hadErrors, __ATOMIC_RELAXED); return parser.hadErrors;
} }
private: private:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment