Commit aa9c3115 authored by Kenton Varda's avatar Kenton Varda

Remove some more mutexes and atomics that I missed.

parent 91acb5b2
......@@ -47,7 +47,8 @@ public:
private:
Node& parent;
DeclName::Reader targetName;
kj::Lazy<kj::Maybe<Node&>> target;
bool initialized = false;
kj::Maybe<Node&> target;
};
class Compiler::Node: public NodeTranslator::Resolver {
......@@ -326,9 +327,11 @@ private:
// =======================================================================================
kj::Maybe<Compiler::Node&> Compiler::Alias::getTarget() {
return target.get([this](kj::SpaceFor<kj::Maybe<Node&>>& space) {
return space.construct(parent.lookup(targetName));
});
if (!initialized) {
initialized = true;
target = parent.lookup(targetName);
}
return target;
}
// =======================================================================================
......
......@@ -211,7 +211,7 @@ public:
private:
GlobalErrorReporter& errorReporter;
kj::Vector<kj::String> searchPath;
kj::MutexGuarded<std::map<kj::StringPtr, kj::Own<Module>>> modules;
std::map<kj::StringPtr, kj::Own<Module>> modules;
};
class ModuleLoader::ModuleImpl final: public Module {
......@@ -278,10 +278,8 @@ kj::Maybe<Module&> ModuleLoader::Impl::loadModule(
kj::String canonicalLocalName = canonicalizePath(localName);
kj::String canonicalSourceName = canonicalizePath(sourceName);
auto locked = modules.lockExclusive();
auto iter = locked->find(canonicalLocalName);
if (iter != locked->end()) {
auto iter = modules.find(canonicalLocalName);
if (iter != modules.end()) {
// Return existing file.
return *iter->second;
}
......@@ -294,7 +292,7 @@ kj::Maybe<Module&> ModuleLoader::Impl::loadModule(
auto module = kj::heap<ModuleImpl>(
*this, kj::mv(canonicalLocalName), kj::mv(canonicalSourceName));
auto& result = *module;
locked->insert(std::make_pair(result.getLocalName(), kj::mv(module)));
modules.insert(std::make_pair(result.getLocalName(), kj::mv(module)));
return result;
}
......
......@@ -896,18 +896,18 @@ private:
}
kj::Maybe<ExportId> writeDescriptor(rpc::CapDescriptor::Builder descriptor) override {
__atomic_store_n(&receivedCall, true, __ATOMIC_RELAXED);
receivedCall = true;
return connectionState->writeDescriptor(*cap, descriptor);
}
kj::Maybe<kj::Own<ClientHook>> writeTarget(
rpc::MessageTarget::Builder target) override {
__atomic_store_n(&receivedCall, true, __ATOMIC_RELAXED);
receivedCall = true;
return connectionState->writeTarget(*cap, target);
}
kj::Own<ClientHook> getInnermostClient() override {
__atomic_store_n(&receivedCall, true, __ATOMIC_RELAXED);
receivedCall = true;
return connectionState->getInnermostClient(*cap);
}
......@@ -915,13 +915,13 @@ private:
Request<ObjectPointer, ObjectPointer> newCall(
uint64_t interfaceId, uint16_t methodId, uint firstSegmentWordSize) override {
__atomic_store_n(&receivedCall, true, __ATOMIC_RELAXED);
receivedCall = true;
return cap->newCall(interfaceId, methodId, firstSegmentWordSize);
}
VoidPromiseAndPipeline call(uint64_t interfaceId, uint16_t methodId,
kj::Own<CallContextHook>&& context) override {
__atomic_store_n(&receivedCall, true, __ATOMIC_RELAXED);
receivedCall = true;
return cap->call(interfaceId, methodId, kj::mv(context));
}
......@@ -951,8 +951,7 @@ private:
bool receivedCall = false;
void resolve(kj::Own<ClientHook> replacement) {
if (replacement->getBrand() != connectionState.get() &&
__atomic_load_n(&receivedCall, __ATOMIC_RELAXED)) {
if (replacement->getBrand() != connectionState.get() && receivedCall) {
// The new capability is hosted locally, not on the remote machine. And, we had made calls
// to the promise. We need to make sure those calls echo back to us before we allow new
// calls to go directly to the local capability, so we need to set a local embargo and send
......@@ -1984,8 +1983,10 @@ private:
// the RpcCallContext is now responsible for cleaning up the entry in the answer table, since
// a Finish message was already received.
if (__atomic_fetch_or(&cancellationFlags, CANCEL_REQUESTED, __ATOMIC_RELAXED) ==
CANCEL_ALLOWED) {
bool previouslyAllowedButNotRequested = cancellationFlags == CANCEL_ALLOWED;
cancellationFlags |= CANCEL_REQUESTED;
if (previouslyAllowedButNotRequested) {
// We just set CANCEL_REQUESTED, and CANCEL_ALLOWED was already set previously. Initiate
// the cancellation.
cancelFulfiller->fulfill();
......@@ -2092,15 +2093,17 @@ private:
// at creation.
KJ_REQUIRE(request == nullptr, "Must call releaseParams() before allowAsyncCancellation().");
if (__atomic_fetch_or(&cancellationFlags, CANCEL_ALLOWED, __ATOMIC_RELAXED) ==
CANCEL_REQUESTED) {
bool previouslyRequestedButNotAllowed = cancellationFlags == CANCEL_REQUESTED;
cancellationFlags |= CANCEL_ALLOWED;
if (previouslyRequestedButNotAllowed) {
// We just set CANCEL_ALLOWED, and CANCEL_REQUESTED was already set previously. Initiate
// the cancellation.
cancelFulfiller->fulfill();
}
}
bool isCanceled() override {
return __atomic_load_n(&cancellationFlags, __ATOMIC_RELAXED) & CANCEL_REQUESTED;
return cancellationFlags & CANCEL_REQUESTED;
}
kj::Own<CallContextHook> addRef() override {
return kj::addRef(*this);
......@@ -2133,8 +2136,7 @@ private:
};
uint8_t cancellationFlags = 0;
// When both flags are set, the cancellation process will begin. Must be manipulated atomically
// as it may be accessed from multiple threads.
// When both flags are set, the cancellation process will begin.
kj::Own<kj::PromiseFulfiller<void>> cancelFulfiller;
// Fulfilled when cancellation has been both requested and permitted. The fulfilled promise is
......@@ -2160,14 +2162,10 @@ private:
// answer table. Or we might even be responsible for removing the entire answer table
// entry.
// TODO(cleanup): This code and the code that calls it is really ugly. Moving a lock as
// a parameter? Yuck. Maybe when cancellation and thread-safety are removed this will
// get simpler?
kj::Own<PipelineHook> pipelineToRelease;
Answer answerToDelete;
if (__atomic_load_n(&cancellationFlags, __ATOMIC_RELAXED) & CANCEL_REQUESTED) {
if (cancellationFlags & CANCEL_REQUESTED) {
answerToDelete = kj::mv(connectionState->answers[questionId]);
// Erase from the table.
......
......@@ -126,11 +126,11 @@ public:
message);
// We intentionally only set hadErrors true if reportError() didn't throw.
__atomic_store_n(&parser.hadErrors, true, __ATOMIC_RELAXED);
parser.hadErrors = true;
}
bool hadErrors() override {
return __atomic_load_n(&parser.hadErrors, __ATOMIC_RELAXED);
return parser.hadErrors;
}
private:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment