Commit 850af66a authored by Kenton Varda's avatar Kenton Varda

Add KJ_ prefix to logging/assert macros.

parent a7933e07
......@@ -168,7 +168,7 @@ kj::ArrayPtr<const kj::ArrayPtr<const word>> BuilderArena::getSegmentsForOutput(
return kj::arrayPtr(&segment0ForOutput, 1);
}
} else {
DASSERT(moreSegments->forOutput.size() == moreSegments->builders.size() + 1,
KJ_DASSERT(moreSegments->forOutput.size() == moreSegments->builders.size() + 1,
"moreSegments->forOutput wasn't resized correctly when the last builder was added.",
moreSegments->forOutput.size(), moreSegments->builders.size());
......
......@@ -181,7 +181,7 @@ struct UseScratch {
word* words;
ScratchSpace() {
REQUIRE(scratchCounter < 6, "Too many scratch spaces needed at once.");
KJ_REQUIRE(scratchCounter < 6, "Too many scratch spaces needed at once.");
words = scratchSpace + scratchCounter++ * SCRATCH_SIZE;
}
~ScratchSpace() {
......
......@@ -128,8 +128,8 @@ void TextBlob::allocate(size_t textSize, size_t branchCount,
}
void TextBlob::fill(char* textPos, Branch* branchesPos) {
ASSERT(textPos == text.end(), textPos - text.end());
ASSERT(branchesPos == branches.end(), branchesPos - branches.end());
KJ_ASSERT(textPos == text.end(), textPos - text.end());
KJ_ASSERT(branchesPos == branches.end(), branchesPos - branches.end());
}
template <typename First, typename... Rest>
......@@ -217,7 +217,7 @@ Text::Reader getUnqualifiedName(Schema schema) {
return nested.getName();
}
}
FAIL_REQUIRE("A schema Node's supposed scope did not contain the node as a NestedNode.");
KJ_FAIL_REQUIRE("A schema Node's supposed scope did not contain the node as a NestedNode.");
return "(?)";
}
......@@ -365,22 +365,22 @@ TextBlob genValue(schema::Type::Reader type, schema::Value::Reader value, Schema
case schema::Value::Body::TEXT_VALUE: return text(DynamicValue::Reader(body.getTextValue()));
case schema::Value::Body::DATA_VALUE: return text(DynamicValue::Reader(body.getDataValue()));
case schema::Value::Body::LIST_VALUE: {
REQUIRE(type.getBody().which() == schema::Type::Body::LIST_TYPE, "type/value mismatch");
KJ_REQUIRE(type.getBody().which() == schema::Type::Body::LIST_TYPE, "type/value mismatch");
auto value = body.getListValue<DynamicList>(
ListSchema::of(type.getBody().getListType(), scope));
return text(value);
}
case schema::Value::Body::ENUM_VALUE: {
REQUIRE(type.getBody().which() == schema::Type::Body::ENUM_TYPE, "type/value mismatch");
KJ_REQUIRE(type.getBody().which() == schema::Type::Body::ENUM_TYPE, "type/value mismatch");
auto enumNode = scope.getDependency(type.getBody().getEnumType()).asEnum().getProto();
auto enumType = enumNode.getBody().getEnumNode();
auto enumerants = enumType.getEnumerants();
REQUIRE(body.getEnumValue() < enumerants.size(),
KJ_REQUIRE(body.getEnumValue() < enumerants.size(),
"Enum value out-of-range.", body.getEnumValue(), enumNode.getDisplayName());
return text(enumerants[body.getEnumValue()].getName());
}
case schema::Value::Body::STRUCT_VALUE: {
REQUIRE(type.getBody().which() == schema::Type::Body::STRUCT_TYPE, "type/value mismatch");
KJ_REQUIRE(type.getBody().which() == schema::Type::Body::STRUCT_TYPE, "type/value mismatch");
auto value = body.getStructValue<DynamicStruct>(
scope.getDependency(type.getBody().getStructType()).asStruct());
return text(value);
......@@ -400,7 +400,7 @@ TextBlob genAnnotation(schema::Annotation::Reader annotation,
const char* prefix = " ", const char* suffix = "") {
auto decl = schemaLoader.get(annotation.getId());
auto body = decl.getProto().getBody();
REQUIRE(body.which() == schema::Node::Body::ANNOTATION_NODE);
KJ_REQUIRE(body.which() == schema::Node::Body::ANNOTATION_NODE);
auto annDecl = body.getAnnotationNode();
return text(prefix, "$", nodeName(decl, scope), "(",
......@@ -468,12 +468,12 @@ TextBlob genDecl(Schema schema, Text::Reader name, uint64_t scopeId, Indent inde
auto proto = schema.getProto();
if (proto.getScopeId() != scopeId) {
// This appears to be an alias for something declared elsewhere.
FAIL_REQUIRE("Aliases not implemented.");
KJ_FAIL_REQUIRE("Aliases not implemented.");
}
switch (proto.getBody().which()) {
case schema::Node::Body::FILE_NODE:
FAIL_REQUIRE("Encountered nested file node.");
KJ_FAIL_REQUIRE("Encountered nested file node.");
break;
case schema::Node::Body::STRUCT_NODE: {
auto body = proto.getBody().getStructNode();
......@@ -578,7 +578,7 @@ TextBlob genNestedDecls(Schema schema, Indent indent) {
TextBlob genFile(Schema file) {
auto proto = file.getProto();
auto body = proto.getBody();
REQUIRE(body.which() == schema::Node::Body::FILE_NODE, "Expected a file node.",
KJ_REQUIRE(body.which() == schema::Node::Body::FILE_NODE, "Expected a file node.",
(uint)body.which());
return text(
......
This diff is collapsed.
......@@ -279,7 +279,7 @@ UnionState initUnion(Func&& initializer) {
initializer(builder.getRoot<StructType>());
kj::ArrayPtr<const word> segment = builder.getSegmentsForOutput()[0];
ASSERT(segment.size() > 2, segment.size());
KJ_ASSERT(segment.size() > 2, segment.size());
// Find the offset of the first set bit after the union discriminants.
int offset = 0;
......
......@@ -102,12 +102,12 @@ struct WirePointer {
}
KJ_ALWAYS_INLINE(WordCount farPositionInSegment() const) {
DREQUIRE(kind() == FAR,
KJ_DREQUIRE(kind() == FAR,
"positionInSegment() should only be called on FAR pointers.");
return (offsetAndKind.get() >> 3) * WORDS;
}
KJ_ALWAYS_INLINE(bool isDoubleFar() const) {
DREQUIRE(kind() == FAR,
KJ_DREQUIRE(kind() == FAR,
"isDoubleFar() should only be called on FAR pointers.");
return (offsetAndKind.get() >> 2) & 1;
}
......@@ -155,12 +155,12 @@ struct WirePointer {
}
KJ_ALWAYS_INLINE(void set(FieldSize es, ElementCount ec)) {
DREQUIRE(ec < (1 << 29) * ELEMENTS, "Lists are limited to 2**29 elements.");
KJ_DREQUIRE(ec < (1 << 29) * ELEMENTS, "Lists are limited to 2**29 elements.");
elementSizeAndCount.set(((ec / ELEMENTS) << 3) | static_cast<int>(es));
}
KJ_ALWAYS_INLINE(void setInlineComposite(WordCount wc)) {
DREQUIRE(wc < (1 << 29) * WORDS, "Inline composite lists are limited to 2**29 words.");
KJ_DREQUIRE(wc < (1 << 29) * WORDS, "Inline composite lists are limited to 2**29 words.");
elementSizeAndCount.set(((wc / WORDS) << 3) |
static_cast<int>(FieldSize::INLINE_COMPOSITE));
}
......@@ -380,7 +380,7 @@ struct WireHelpers {
case FieldSize::INLINE_COMPOSITE: {
WirePointer* elementTag = reinterpret_cast<WirePointer*>(ptr);
ASSERT(elementTag->kind() == WirePointer::STRUCT,
KJ_ASSERT(elementTag->kind() == WirePointer::STRUCT,
"Don't know how to handle non-STRUCT inline composite.");
WordCount dataSize = elementTag->structRef.dataSize.get();
WirePointerCount pointerCount = elementTag->structRef.ptrCount.get();
......@@ -553,6 +553,7 @@ struct WireHelpers {
}
// -----------------------------------------------------------------
// Copy from an unchecked message.
static KJ_ALWAYS_INLINE(
void copyStruct(SegmentBuilder* segment, word* dst, const word* src,
......@@ -640,7 +641,7 @@ struct WireHelpers {
const word* srcElement = srcPtr + POINTER_SIZE_IN_WORDS;
word* dstElement = dstPtr + POINTER_SIZE_IN_WORDS;
ASSERT(srcTag->kind() == WirePointer::STRUCT,
KJ_ASSERT(srcTag->kind() == WirePointer::STRUCT,
"INLINE_COMPOSITE of lists is not yet supported.");
uint n = srcTag->inlineCompositeListElementCount() / ELEMENTS;
......@@ -656,7 +657,7 @@ struct WireHelpers {
break;
}
default:
FAIL_REQUIRE("Copy source message contained unexpected kind.");
KJ_FAIL_REQUIRE("Copy source message contained unexpected kind.");
break;
}
......@@ -689,7 +690,7 @@ struct WireHelpers {
// Darn, need a double-far.
SegmentBuilder* farSegment = srcSegment->getArena()->getSegmentWithAvailable(2 * WORDS);
landingPad = reinterpret_cast<WirePointer*>(farSegment->allocate(2 * WORDS));
DASSERT(landingPad != nullptr,
KJ_DASSERT(landingPad != nullptr,
"getSegmentWithAvailable() returned segment without space available.");
landingPad[0].setFar(false, srcSegment->getOffsetTo(src->target()));
......@@ -796,7 +797,7 @@ struct WireHelpers {
static KJ_ALWAYS_INLINE(ListBuilder initListPointer(
WirePointer* ref, SegmentBuilder* segment, ElementCount elementCount,
FieldSize elementSize)) {
DREQUIRE(elementSize != FieldSize::INLINE_COMPOSITE,
KJ_DREQUIRE(elementSize != FieldSize::INLINE_COMPOSITE,
"Should have called initStructListPointer() instead.");
BitCount dataSize = dataBitsPerElement(elementSize) * ELEMENTS;
......@@ -848,7 +849,7 @@ struct WireHelpers {
static KJ_ALWAYS_INLINE(ListBuilder getWritableListPointer(
WirePointer* origRef, SegmentBuilder* origSegment, FieldSize elementSize,
const word* defaultValue)) {
DREQUIRE(elementSize != FieldSize::INLINE_COMPOSITE,
KJ_DREQUIRE(elementSize != FieldSize::INLINE_COMPOSITE,
"Use getStructList{Element,Field}() for structs.");
if (origRef->isNull()) {
......@@ -886,7 +887,7 @@ struct WireHelpers {
// Read the tag to get the actual element count.
WirePointer* tag = reinterpret_cast<WirePointer*>(ptr);
REQUIRE(tag->kind() == WirePointer::STRUCT,
KJ_REQUIRE(tag->kind() == WirePointer::STRUCT,
"INLINE_COMPOSITE list with non-STRUCT elements not supported.");
ptr += POINTER_SIZE_IN_WORDS;
......@@ -915,7 +916,7 @@ struct WireHelpers {
break;
case FieldSize::INLINE_COMPOSITE:
FAIL_ASSERT("Can't get here.");
KJ_FAIL_ASSERT("Can't get here.");
break;
}
......@@ -1148,11 +1149,11 @@ struct WireHelpers {
} else {
// If oldSize were POINTER or EIGHT_BYTES then the preferred size must be
// INLINE_COMPOSITE because any other compatible size would not require an upgrade.
ASSERT(oldSize < FieldSize::EIGHT_BYTES);
KJ_ASSERT(oldSize < FieldSize::EIGHT_BYTES);
// If the preferred size were BIT then oldSize must be VOID, but we handled that case
// above.
ASSERT(elementSize.preferredListEncoding >= FieldSize::BIT);
KJ_ASSERT(elementSize.preferredListEncoding >= FieldSize::BIT);
// OK, so the expected list elements are all data and between 1 byte and 1 word each,
// and the old element are data between 1 bit and 4 bytes. We're upgrading from one
......@@ -1226,9 +1227,9 @@ struct WireHelpers {
} else {
word* ptr = followFars(ref, segment);
REQUIRE(ref->kind() == WirePointer::LIST,
KJ_REQUIRE(ref->kind() == WirePointer::LIST,
"Called getText{Field,Element}() but existing pointer is not a list.");
REQUIRE(ref->listRef.elementSize() == FieldSize::BYTE,
KJ_REQUIRE(ref->listRef.elementSize() == FieldSize::BYTE,
"Called getText{Field,Element}() but existing list pointer is not byte-sized.");
// Subtract 1 from the size for the NUL terminator.
......@@ -1263,9 +1264,9 @@ struct WireHelpers {
} else {
word* ptr = followFars(ref, segment);
REQUIRE(ref->kind() == WirePointer::LIST,
KJ_REQUIRE(ref->kind() == WirePointer::LIST,
"Called getData{Field,Element}() but existing pointer is not a list.");
REQUIRE(ref->listRef.elementSize() == FieldSize::BYTE,
KJ_REQUIRE(ref->listRef.elementSize() == FieldSize::BYTE,
"Called getData{Field,Element}() but existing list pointer is not byte-sized.");
return Data::Builder(reinterpret_cast<char*>(ptr), ref->listRef.elementCount() / ELEMENTS);
......@@ -1291,7 +1292,7 @@ struct WireHelpers {
if (ref->listRef.elementSize() == FieldSize::INLINE_COMPOSITE) {
// Read the tag to get the actual element count.
WirePointer* tag = reinterpret_cast<WirePointer*>(ptr);
REQUIRE(tag->kind() == WirePointer::STRUCT,
KJ_REQUIRE(tag->kind() == WirePointer::STRUCT,
"INLINE_COMPOSITE list with non-STRUCT elements not supported.");
// First list element is at tag + 1 pointer.
......@@ -1364,7 +1365,7 @@ struct WireHelpers {
case 32: elementSize = FieldSize::FOUR_BYTES; break;
case 64: elementSize = FieldSize::EIGHT_BYTES; break;
default:
FAIL_ASSERT("invalid list step size", value.step * ELEMENTS / BITS);
KJ_FAIL_ASSERT("invalid list step size", value.step * ELEMENTS / BITS);
break;
}
......@@ -1951,7 +1952,7 @@ ObjectReader StructReader::getObjectField(
}
const word* StructReader::getUncheckedPointer(WirePointerCount ptrIndex) const {
REQUIRE(segment == nullptr, "getUncheckedPointer() only allowed on unchecked messages.");
KJ_REQUIRE(segment == nullptr, "getUncheckedPointer() only allowed on unchecked messages.");
return reinterpret_cast<const word*>(pointers + ptrIndex);
}
......@@ -2142,7 +2143,7 @@ StructReader ListReader::getStructElement(ElementCount index) const {
reinterpret_cast<const WirePointer*>(structData + structDataSize / BITS_PER_BYTE);
// This check should pass if there are no bugs in the list pointer validation code.
DASSERT(structPointerCount == 0 * POINTERS ||
KJ_DASSERT(structPointerCount == 0 * POINTERS ||
(uintptr_t)structPointers % sizeof(WirePointer) == 0,
"Pointer segment of struct list element not aligned.");
......@@ -2153,7 +2154,7 @@ StructReader ListReader::getStructElement(ElementCount index) const {
}
static const WirePointer* checkAlignment(const void* ptr) {
DASSERT((uintptr_t)ptr % sizeof(WirePointer) == 0,
KJ_DASSERT((uintptr_t)ptr % sizeof(WirePointer) == 0,
"Pointer segment of struct list element not aligned.");
return reinterpret_cast<const WirePointer*>(ptr);
}
......
......@@ -80,10 +80,10 @@ internal::SegmentBuilder* MessageBuilder::getRootSegment() {
WordCount ptrSize = 1 * POINTERS * WORDS_PER_POINTER;
internal::SegmentBuilder* segment = arena()->getSegmentWithAvailable(ptrSize);
ASSERT(segment->getSegmentId() == internal::SegmentId(0),
KJ_ASSERT(segment->getSegmentId() == internal::SegmentId(0),
"First allocated word of new arena was not in segment ID 0.");
word* location = segment->allocate(ptrSize);
ASSERT(location == segment->getPtrUnchecked(0 * WORDS),
KJ_ASSERT(location == segment->getPtrUnchecked(0 * WORDS),
"First allocated word of new arena was not the first word in its segment.");
return segment;
}
......@@ -146,10 +146,10 @@ MallocMessageBuilder::MallocMessageBuilder(
kj::ArrayPtr<word> firstSegment, AllocationStrategy allocationStrategy)
: nextSize(firstSegment.size()), allocationStrategy(allocationStrategy),
ownFirstSegment(false), returnedFirstSegment(false), firstSegment(firstSegment.begin()) {
REQUIRE(firstSegment.size() > 0, "First segment size must be non-zero.");
KJ_REQUIRE(firstSegment.size() > 0, "First segment size must be non-zero.");
// Checking just the first word should catch most cases of failing to zero the segment.
REQUIRE(*reinterpret_cast<uint64_t*>(firstSegment.begin()) == 0,
KJ_REQUIRE(*reinterpret_cast<uint64_t*>(firstSegment.begin()) == 0,
"First segment must be zeroed.");
}
......@@ -161,7 +161,7 @@ MallocMessageBuilder::~MallocMessageBuilder() {
// Must zero first segment.
kj::ArrayPtr<const kj::ArrayPtr<const word>> segments = getSegmentsForOutput();
if (segments.size() > 0) {
ASSERT(segments[0].begin() == firstSegment,
KJ_ASSERT(segments[0].begin() == firstSegment,
"First segment in getSegmentsForOutput() is not the first segment allocated?");
memset(firstSegment, 0, segments[0].size() * sizeof(word));
}
......@@ -218,12 +218,12 @@ FlatMessageBuilder::FlatMessageBuilder(kj::ArrayPtr<word> array): array(array),
FlatMessageBuilder::~FlatMessageBuilder() {}
void FlatMessageBuilder::requireFilled() {
REQUIRE(getSegmentsForOutput()[0].end() == array.end(),
KJ_REQUIRE(getSegmentsForOutput()[0].end() == array.end(),
"FlatMessageBuilder's buffer was too large.");
}
kj::ArrayPtr<word> FlatMessageBuilder::allocateSegment(uint minimumSize) {
REQUIRE(!allocated, "FlatMessageBuilder's buffer was not large enough.");
KJ_REQUIRE(!allocated, "FlatMessageBuilder's buffer was not large enough.");
allocated = true;
return array;
}
......
......@@ -84,7 +84,7 @@ public:
nodeName = node.getDisplayName();
dependencies.clear();
CONTEXT("validating schema node", nodeName, (uint)node.getBody().which());
KJ_CONTEXT("validating schema node", nodeName, (uint)node.getBody().which());
switch (node.getBody().which()) {
case schema::Node::Body::FILE_NODE:
......@@ -119,7 +119,7 @@ public:
for (auto& dep: dependencies) {
result[pos++] = dep.second;
}
DASSERT(pos == *count);
KJ_DASSERT(pos == *count);
return result;
}
......@@ -131,7 +131,7 @@ public:
for (auto& member: members) {
result[pos++] = internal::RawSchema::MemberInfo(member.first.first, member.second);
}
DASSERT(pos == *count);
KJ_DASSERT(pos == *count);
return result;
}
......@@ -218,7 +218,7 @@ private:
uint index = 0;
for (auto member: members) {
CONTEXT("validating struct member", member.getName());
KJ_CONTEXT("validating struct member", member.getName());
validate(member, sawCodeOrder, sawOrdinal, dataSizeInBits, pointerCount, 0, index++);
}
}
......@@ -270,7 +270,7 @@ private:
uint subIndex = 0;
for (auto uMember: uMembers) {
CONTEXT("validating union member", uMember.getName());
KJ_CONTEXT("validating union member", uMember.getName());
VALIDATE_SCHEMA(
uMember.getBody().which() == schema::StructNode::Member::Body::FIELD_MEMBER,
"Union members must be fields.");
......@@ -307,7 +307,7 @@ private:
uint index = 0;
for (auto method: methods) {
CONTEXT("validating method", method.getName());
KJ_CONTEXT("validating method", method.getName());
validateMemberName(method.getName(), 0, index++);
VALIDATE_SCHEMA(method.getCodeOrder() < methods.size() &&
......@@ -317,7 +317,7 @@ private:
auto params = method.getParams();
for (auto param: params) {
CONTEXT("validating parameter", param.getName());
KJ_CONTEXT("validating parameter", param.getName());
uint dummy1;
bool dummy2;
validate(param.getType(), param.getDefaultValue(), &dummy1, &dummy2);
......@@ -444,10 +444,10 @@ public:
bool shouldReplace(schema::Node::Reader existingNode, schema::Node::Reader replacement,
bool replacementIsNative) {
CONTEXT("checking compatibility with previously-loaded node of the same id",
existingNode.getDisplayName());
KJ_CONTEXT("checking compatibility with previously-loaded node of the same id",
existingNode.getDisplayName());
DREQUIRE(existingNode.getId() == replacement.getId());
KJ_DREQUIRE(existingNode.getId() == replacement.getId());
nodeName = existingNode.getDisplayName();
compatibility = EQUIVALENT;
......@@ -593,7 +593,7 @@ private:
void checkCompatibility(schema::StructNode::Member::Reader member,
schema::StructNode::Member::Reader replacement) {
CONTEXT("comparing struct member", member.getName());
KJ_CONTEXT("comparing struct member", member.getName());
switch (member.getBody().which()) {
case schema::StructNode::Member::Body::FIELD_MEMBER: {
......@@ -665,7 +665,7 @@ private:
void checkCompatibility(schema::InterfaceNode::Method::Reader method,
schema::InterfaceNode::Method::Reader replacement) {
CONTEXT("comparing method", method.getName());
KJ_CONTEXT("comparing method", method.getName());
auto params = method.getParams();
auto replacementParams = replacement.getParams();
......@@ -681,7 +681,7 @@ private:
auto param = params[i];
auto replacementParam = replacementParams[i];
CONTEXT("comparing parameter", param.getName());
KJ_CONTEXT("comparing parameter", param.getName());
checkCompatibility(param.getType(), replacementParam.getType(),
NO_UPGRADE_TO_STRUCT);
......@@ -1026,7 +1026,7 @@ internal::RawSchema* SchemaLoader::Impl::loadNative(const internal::RawSchema* n
if (slot == nullptr) {
slot = allocate<internal::RawSchema>();
} else if (slot->canCastTo != nullptr) {
REQUIRE(slot->canCastTo == nativeSchema,
KJ_REQUIRE(slot->canCastTo == nativeSchema,
"two different compiled-in type have the same type ID",
reader.getId(), reader.getDisplayName(),
readMessageUnchecked<schema::Node>(slot->canCastTo->encodedNode).getDisplayName());
......@@ -1079,7 +1079,7 @@ internal::RawSchema* SchemaLoader::Impl::loadEmpty(
case schema::Node::Body::FILE_NODE:
case schema::Node::Body::CONST_NODE:
case schema::Node::Body::ANNOTATION_NODE:
FAIL_REQUIRE("Not a type.");
KJ_FAIL_REQUIRE("Not a type.");
break;
}
......@@ -1111,7 +1111,7 @@ SchemaLoader::~SchemaLoader() {}
Schema SchemaLoader::get(uint64_t id) const {
internal::RawSchema* raw = impl->tryGet(id);
REQUIRE(raw != nullptr, "no schema node loaded for id", id);
KJ_REQUIRE(raw != nullptr, "no schema node loaded for id", id);
return Schema(raw);
}
......
......@@ -51,33 +51,33 @@ Schema Schema::getDependency(uint64_t id) const {
}
}
FAIL_REQUIRE("Requested ID not found in dependency table.", id);
KJ_FAIL_REQUIRE("Requested ID not found in dependency table.", id);
return Schema();
}
StructSchema Schema::asStruct() const {
REQUIRE(getProto().getBody().which() == schema::Node::Body::STRUCT_NODE,
KJ_REQUIRE(getProto().getBody().which() == schema::Node::Body::STRUCT_NODE,
"Tried to use non-struct schema as a struct.",
getProto().getDisplayName());
return StructSchema(raw);
}
EnumSchema Schema::asEnum() const {
REQUIRE(getProto().getBody().which() == schema::Node::Body::ENUM_NODE,
KJ_REQUIRE(getProto().getBody().which() == schema::Node::Body::ENUM_NODE,
"Tried to use non-enum schema as an enum.",
getProto().getDisplayName());
return EnumSchema(raw);
}
InterfaceSchema Schema::asInterface() const {
REQUIRE(getProto().getBody().which() == schema::Node::Body::INTERFACE_NODE,
KJ_REQUIRE(getProto().getBody().which() == schema::Node::Body::INTERFACE_NODE,
"Tried to use non-interface schema as an interface.",
getProto().getDisplayName());
return InterfaceSchema(raw);
}
void Schema::requireUsableAs(const internal::RawSchema* expected) {
REQUIRE(raw == expected ||
KJ_REQUIRE(raw == expected ||
(raw != nullptr && expected != nullptr && raw->canCastTo == expected),
"This schema is not compatible with the requested native type.");
}
......@@ -132,7 +132,7 @@ StructSchema::Member StructSchema::getMemberByName(Text::Reader name) const {
KJ_IF_MAYBE(member, findMemberByName(name)) {
return *member;
} else {
FAIL_REQUIRE("struct has no such member", name);
KJ_FAIL_REQUIRE("struct has no such member", name);
}
}
......@@ -142,7 +142,7 @@ kj::Maybe<StructSchema::Union> StructSchema::Member::getContainingUnion() const
}
StructSchema::Union StructSchema::Member::asUnion() const {
REQUIRE(proto.getBody().which() == schema::StructNode::Member::Body::UNION_MEMBER,
KJ_REQUIRE(proto.getBody().which() == schema::StructNode::Member::Body::UNION_MEMBER,
"Tried to use non-union struct member as a union.",
parent.getProto().getDisplayName(), proto.getName());
return Union(*this);
......@@ -160,7 +160,7 @@ StructSchema::Member StructSchema::Union::getMemberByName(Text::Reader name) con
KJ_IF_MAYBE(member, findMemberByName(name)) {
return *member;
} else {
FAIL_REQUIRE("union has no such member", name);
KJ_FAIL_REQUIRE("union has no such member", name);
}
}
......@@ -178,7 +178,7 @@ EnumSchema::Enumerant EnumSchema::getEnumerantByName(Text::Reader name) const {
KJ_IF_MAYBE(enumerant, findEnumerantByName(name)) {
return *enumerant;
} else {
FAIL_REQUIRE("enum has no such enumerant", name);
KJ_FAIL_REQUIRE("enum has no such enumerant", name);
}
}
......@@ -196,7 +196,7 @@ InterfaceSchema::Method InterfaceSchema::getMethodByName(Text::Reader name) cons
KJ_IF_MAYBE(method, findMethodByName(name)) {
return *method;
} else {
FAIL_REQUIRE("interface has no such method", name);
KJ_FAIL_REQUIRE("interface has no such method", name);
}
}
......@@ -224,11 +224,11 @@ ListSchema ListSchema::of(schema::Type::Body::Which primitiveType) {
case schema::Type::Body::ENUM_TYPE:
case schema::Type::Body::INTERFACE_TYPE:
case schema::Type::Body::LIST_TYPE:
FAIL_REQUIRE("Must use one of the other ListSchema::of() overloads for complex types.");
KJ_FAIL_REQUIRE("Must use one of the other ListSchema::of() overloads for complex types.");
break;
case schema::Type::Body::OBJECT_TYPE:
FAIL_REQUIRE("List(Object) not supported.");
KJ_FAIL_REQUIRE("List(Object) not supported.");
break;
}
......@@ -267,7 +267,7 @@ ListSchema ListSchema::of(schema::Type::Reader elementType, Schema context) {
return of(of(body.getListType(), context));
case schema::Type::Body::OBJECT_TYPE:
FAIL_REQUIRE("List(Object) not supported.");
KJ_FAIL_REQUIRE("List(Object) not supported.");
return ListSchema();
}
......@@ -276,31 +276,31 @@ ListSchema ListSchema::of(schema::Type::Reader elementType, Schema context) {
}
StructSchema ListSchema::getStructElementType() const {
REQUIRE(nestingDepth == 0 && elementType == schema::Type::Body::STRUCT_TYPE,
KJ_REQUIRE(nestingDepth == 0 && elementType == schema::Type::Body::STRUCT_TYPE,
"ListSchema::getStructElementType(): The elements are not structs.");
return elementSchema.asStruct();
}
EnumSchema ListSchema::getEnumElementType() const {
REQUIRE(nestingDepth == 0 && elementType == schema::Type::Body::ENUM_TYPE,
KJ_REQUIRE(nestingDepth == 0 && elementType == schema::Type::Body::ENUM_TYPE,
"ListSchema::getEnumElementType(): The elements are not enums.");
return elementSchema.asEnum();
}
InterfaceSchema ListSchema::getInterfaceElementType() const {
REQUIRE(nestingDepth == 0 && elementType == schema::Type::Body::INTERFACE_TYPE,
KJ_REQUIRE(nestingDepth == 0 && elementType == schema::Type::Body::INTERFACE_TYPE,
"ListSchema::getInterfaceElementType(): The elements are not interfaces.");
return elementSchema.asInterface();
}
ListSchema ListSchema::getListElementType() const {
REQUIRE(nestingDepth > 0,
KJ_REQUIRE(nestingDepth > 0,
"ListSchema::getListElementType(): The elements are not lists.");
return ListSchema(elementType, nestingDepth - 1, elementSchema);
}
void ListSchema::requireUsableAs(ListSchema expected) {
REQUIRE(elementType == expected.elementType && nestingDepth == expected.nestingDepth,
KJ_REQUIRE(elementType == expected.elementType && nestingDepth == expected.nestingDepth,
"This schema is not compatible with the requested native type.");
elementSchema.requireUsableAs(expected.elementSchema.raw);
}
......
......@@ -62,7 +62,7 @@ public:
}
size_t read(void* buffer, size_t minBytes, size_t maxBytes) override {
ASSERT(maxBytes <= data.size() - readPos, "Overran end of stream.");
KJ_ASSERT(maxBytes <= data.size() - readPos, "Overran end of stream.");
size_t amount = std::min(maxBytes, std::max(minBytes, preferredReadSize));
memcpy(buffer, data.data() + readPos, amount);
readPos += amount;
......@@ -70,7 +70,7 @@ public:
}
void skip(size_t bytes) override {
ASSERT(bytes <= data.size() - readPos, "Overran end of stream.");
KJ_ASSERT(bytes <= data.size() - readPos, "Overran end of stream.");
readPos += bytes;
}
......
......@@ -39,8 +39,8 @@ size_t PackedInputStream::read(void* dst, size_t minBytes, size_t maxBytes) {
return 0;
}
DREQUIRE(minBytes % sizeof(word) == 0, "PackedInputStream reads must be word-aligned.");
DREQUIRE(maxBytes % sizeof(word) == 0, "PackedInputStream reads must be word-aligned.");
KJ_DREQUIRE(minBytes % sizeof(word) == 0, "PackedInputStream reads must be word-aligned.");
KJ_DREQUIRE(maxBytes % sizeof(word) == 0, "PackedInputStream reads must be word-aligned.");
uint8_t* __restrict__ out = reinterpret_cast<uint8_t*>(dst);
uint8_t* const outEnd = reinterpret_cast<uint8_t*>(dst) + maxBytes;
......@@ -66,7 +66,7 @@ size_t PackedInputStream::read(void* dst, size_t minBytes, size_t maxBytes) {
for (;;) {
uint8_t tag;
DASSERT((out - reinterpret_cast<uint8_t*>(dst)) % sizeof(word) == 0,
KJ_DASSERT((out - reinterpret_cast<uint8_t*>(dst)) % sizeof(word) == 0,
"Output pointer should always be aligned here.");
if (BUFFER_REMAINING < 10) {
......@@ -122,7 +122,7 @@ size_t PackedInputStream::read(void* dst, size_t minBytes, size_t maxBytes) {
}
if (tag == 0) {
DASSERT(BUFFER_REMAINING > 0, "Should always have non-empty buffer here.");
KJ_DASSERT(BUFFER_REMAINING > 0, "Should always have non-empty buffer here.");
uint runLength = *in++ * sizeof(word);
......@@ -134,7 +134,7 @@ size_t PackedInputStream::read(void* dst, size_t minBytes, size_t maxBytes) {
out += runLength;
} else if (tag == 0xffu) {
DASSERT(BUFFER_REMAINING > 0, "Should always have non-empty buffer here.");
KJ_DASSERT(BUFFER_REMAINING > 0, "Should always have non-empty buffer here.");
uint runLength = *in++ * sizeof(word);
......@@ -177,8 +177,8 @@ size_t PackedInputStream::read(void* dst, size_t minBytes, size_t maxBytes) {
}
}
FAIL_ASSERT("Can't get here.");
return 0; // GCC knows FAIL_ASSERT doesn't return, but Eclipse CDT still warns...
KJ_FAIL_ASSERT("Can't get here.");
return 0; // GCC knows KJ_FAIL_ASSERT doesn't return, but Eclipse CDT still warns...
#undef REFRESH_BUFFER
}
......@@ -190,7 +190,7 @@ void PackedInputStream::skip(size_t bytes) {
return;
}
DREQUIRE(bytes % sizeof(word) == 0, "PackedInputStream reads must be word-aligned.");
KJ_DREQUIRE(bytes % sizeof(word) == 0, "PackedInputStream reads must be word-aligned.");
kj::ArrayPtr<const byte> buffer = inner.getReadBuffer();
const uint8_t* __restrict__ in = reinterpret_cast<const uint8_t*>(buffer.begin());
......@@ -248,7 +248,7 @@ void PackedInputStream::skip(size_t bytes) {
}
if (tag == 0) {
DASSERT(BUFFER_REMAINING > 0, "Should always have non-empty buffer here.");
KJ_DASSERT(BUFFER_REMAINING > 0, "Should always have non-empty buffer here.");
uint runLength = *in++ * sizeof(word);
......@@ -260,7 +260,7 @@ void PackedInputStream::skip(size_t bytes) {
bytes -= runLength;
} else if (tag == 0xffu) {
DASSERT(BUFFER_REMAINING > 0, "Should always have non-empty buffer here.");
KJ_DASSERT(BUFFER_REMAINING > 0, "Should always have non-empty buffer here.");
uint runLength = *in++ * sizeof(word);
......@@ -298,7 +298,7 @@ void PackedInputStream::skip(size_t bytes) {
}
}
FAIL_ASSERT("Can't get here.");
KJ_FAIL_ASSERT("Can't get here.");
}
// -------------------------------------------------------------------
......
......@@ -96,7 +96,7 @@ public:
}
size_t read(void* buffer, size_t minBytes, size_t maxBytes) override {
ASSERT(maxBytes <= data.size() - readPos, "Overran end of stream.");
KJ_ASSERT(maxBytes <= data.size() - readPos, "Overran end of stream.");
size_t amount = std::min(maxBytes, std::max(minBytes, preferredReadSize));
memcpy(buffer, data.data() + readPos, amount);
readPos += amount;
......@@ -104,7 +104,7 @@ public:
}
void skip(size_t bytes) override {
ASSERT(bytes <= data.size() - readPos, "Overran end of stream.");
KJ_ASSERT(bytes <= data.size() - readPos, "Overran end of stream.");
readPos += bytes;
}
......
......@@ -40,7 +40,7 @@ public:
// implements snappy::Source ---------------------------------------
size_t Available() const override {
FAIL_ASSERT("Snappy doesn't actually call this.");
KJ_FAIL_ASSERT("Snappy doesn't actually call this.");
return 0;
}
......@@ -121,7 +121,7 @@ void SnappyInputStream::refill() {
SnappyOutputStream::SnappyOutputStream(
OutputStream& inner, kj::ArrayPtr<byte> buffer, kj::ArrayPtr<byte> compressedBuffer)
: inner(inner) {
DASSERT(SNAPPY_COMPRESSED_BUFFER_SIZE >= snappy::MaxCompressedLength(snappy::kBlockSize),
KJ_DASSERT(SNAPPY_COMPRESSED_BUFFER_SIZE >= snappy::MaxCompressedLength(snappy::kBlockSize),
"snappy::MaxCompressedLength() changed?");
if (buffer.size() < SNAPPY_BUFFER_SIZE) {
......@@ -159,7 +159,7 @@ void SnappyOutputStream::flush() {
snappy::UncheckedByteArraySink sink(reinterpret_cast<char*>(compressedBuffer.begin()));
size_t n = snappy::Compress(&source, &sink);
ASSERT(n <= compressedBuffer.size(),
KJ_ASSERT(n <= compressedBuffer.size(),
"Critical security bug: Snappy compression overran its output buffer.");
inner.write(compressedBuffer.begin(), n);
......
......@@ -104,7 +104,7 @@ public:
~TestInputStream() {}
size_t read(void* buffer, size_t minBytes, size_t maxBytes) override {
ASSERT(maxBytes <= size_t(end - pos), "Overran end of stream.");
KJ_ASSERT(maxBytes <= size_t(end - pos), "Overran end of stream.");
size_t amount = lazy ? minBytes : maxBytes;
memcpy(buffer, pos, amount);
pos += amount;
......
......@@ -88,7 +88,7 @@ kj::ArrayPtr<const word> FlatArrayMessageReader::getSegment(uint id) {
}
kj::Array<word> messageToFlatArray(kj::ArrayPtr<const kj::ArrayPtr<const word>> segments) {
REQUIRE(segments.size() > 0, "Tried to serialize uninitialized message.");
KJ_REQUIRE(segments.size() > 0, "Tried to serialize uninitialized message.");
size_t totalSize = segments.size() / 2 + 1;
......@@ -122,7 +122,7 @@ kj::Array<word> messageToFlatArray(kj::ArrayPtr<const kj::ArrayPtr<const word>>
dst += segment.size();
}
DASSERT(dst == result.end(), "Buffer overrun/underrun bug in code above.");
KJ_DASSERT(dst == result.end(), "Buffer overrun/underrun bug in code above.");
return kj::mv(result);
}
......@@ -237,7 +237,7 @@ kj::ArrayPtr<const word> InputStreamMessageReader::getSegment(uint id) {
// -------------------------------------------------------------------
void writeMessage(kj::OutputStream& output, kj::ArrayPtr<const kj::ArrayPtr<const word>> segments) {
REQUIRE(segments.size() > 0, "Tried to serialize uninitialized message.");
KJ_REQUIRE(segments.size() > 0, "Tried to serialize uninitialized message.");
internal::WireValue<uint32_t> table[(segments.size() + 2) & ~size_t(1)];
......
......@@ -33,11 +33,11 @@ namespace {
struct TestObject {
TestObject() {
index = count;
ASSERT(index != throwAt);
KJ_ASSERT(index != throwAt);
++count;
}
TestObject(const TestObject& other) {
ASSERT(other.index != throwAt);
KJ_ASSERT(other.index != throwAt);
index = -1;
copiedCount++;
}
......@@ -47,7 +47,7 @@ struct TestObject {
} else {
--count;
EXPECT_EQ(index, count);
ASSERT(count != throwAt);
KJ_ASSERT(count != throwAt);
}
}
......
......@@ -125,8 +125,8 @@ void inlineRequireFailure(
#define KJ_IREQUIRE(condition, ...) \
if (KJ_EXPECT_TRUE(condition)); else ::kj::internal::inlineRequireFailure( \
__FILE__, __LINE__, #condition, #__VA_ARGS__, ##__VA_ARGS__)
// Version of REQUIRE() which is safe to use in headers that are #included by users. Used to check
// preconditions inside inline methods. KJ_INLINE_DPRECOND is particularly useful in that
// Version of KJ_REQUIRE() which is safe to use in headers that are #included by users. Used to
// check preconditions inside inline methods. KJ_INLINE_DPRECOND is particularly useful in that
// it will be enabled depending on whether the application is compiled in debug mode rather than
// whether libkj is.
#endif
......
......@@ -217,7 +217,7 @@ void ArrayOutputStream::write(const void* src, size_t size) {
// Oh goody, the caller wrote directly into our buffer.
fillPos += size;
} else {
REQUIRE(size <= (size_t)(array.end() - fillPos),
KJ_REQUIRE(size <= (size_t)(array.end() - fillPos),
"ArrayOutputStream's backing array was not large enough for the data written.");
memcpy(fillPos, src, size);
fillPos += size;
......@@ -240,7 +240,7 @@ size_t FdInputStream::read(void* buffer, size_t minBytes, size_t maxBytes) {
byte* max = pos + maxBytes;
while (pos < min) {
ssize_t n = SYSCALL(::read(fd, pos, max - pos), fd);
ssize_t n = KJ_SYSCALL(::read(fd, pos, max - pos), fd);
VALIDATE_INPUT(n > 0, "Premature EOF") {
return minBytes;
}
......@@ -256,8 +256,8 @@ void FdOutputStream::write(const void* buffer, size_t size) {
const char* pos = reinterpret_cast<const char*>(buffer);
while (size > 0) {
ssize_t n = SYSCALL(::write(fd, pos, size), fd);
ASSERT(n > 0, "write() returned zero.");
ssize_t n = KJ_SYSCALL(::write(fd, pos, size), fd);
KJ_ASSERT(n > 0, "write() returned zero.");
pos += n;
size -= n;
}
......@@ -280,8 +280,8 @@ void FdOutputStream::write(ArrayPtr<const ArrayPtr<const byte>> pieces) {
}
while (current < iov.end()) {
ssize_t n = SYSCALL(::writev(fd, current, iov.end() - current), fd);
ASSERT(n > 0, "writev() returned zero.");
ssize_t n = KJ_SYSCALL(::writev(fd, current, iov.end() - current), fd);
KJ_ASSERT(n > 0, "writev() returned zero.");
while (static_cast<size_t>(n) >= current->iov_len) {
n -= current->iov_len;
......
......@@ -88,7 +88,7 @@ TEST(Logging, Log) {
MockExceptionCallback::ScopedRegistration reg(mockCallback);
int line;
LOG(WARNING, "Hello world!"); line = __LINE__;
KJ_LOG(WARNING, "Hello world!"); line = __LINE__;
EXPECT_EQ("log message: warning: " + fileLine(__FILE__, line) + ": Hello world!\n",
mockCallback.text);
mockCallback.text.clear();
......@@ -96,13 +96,13 @@ TEST(Logging, Log) {
int i = 123;
const char* str = "foo";
LOG(ERROR, i, str); line = __LINE__;
KJ_LOG(ERROR, i, str); line = __LINE__;
EXPECT_EQ("log message: error: " + fileLine(__FILE__, line) + ": i = 123; str = foo\n",
mockCallback.text);
mockCallback.text.clear();
ASSERT(1 == 1);
EXPECT_THROW(ASSERT(1 == 2), MockException); line = __LINE__;
KJ_ASSERT(1 == 1);
EXPECT_THROW(KJ_ASSERT(1 == 2), MockException); line = __LINE__;
EXPECT_EQ("fatal exception: " + fileLine(__FILE__, line) + ": bug in code: expected "
"1 == 2\n", mockCallback.text);
mockCallback.text.clear();
......@@ -118,17 +118,17 @@ TEST(Logging, Log) {
EXPECT_TRUE(recovered);
mockCallback.text.clear();
EXPECT_THROW(ASSERT(1 == 2, i, "hi", str), MockException); line = __LINE__;
EXPECT_THROW(KJ_ASSERT(1 == 2, i, "hi", str), MockException); line = __LINE__;
EXPECT_EQ("fatal exception: " + fileLine(__FILE__, line) + ": bug in code: expected "
"1 == 2; i = 123; hi; str = foo\n", mockCallback.text);
mockCallback.text.clear();
EXPECT_THROW(REQUIRE(1 == 2, i, "hi", str), MockException); line = __LINE__;
EXPECT_THROW(KJ_REQUIRE(1 == 2, i, "hi", str), MockException); line = __LINE__;
EXPECT_EQ("fatal exception: " + fileLine(__FILE__, line) + ": precondition not met: expected "
"1 == 2; i = 123; hi; str = foo\n", mockCallback.text);
mockCallback.text.clear();
EXPECT_THROW(ASSERT(false, "foo"), MockException); line = __LINE__;
EXPECT_THROW(KJ_ASSERT(false, "foo"), MockException); line = __LINE__;
EXPECT_EQ("fatal exception: " + fileLine(__FILE__, line) + ": bug in code: foo\n",
mockCallback.text);
mockCallback.text.clear();
......@@ -142,9 +142,9 @@ TEST(Logging, Syscall) {
int i = 123;
const char* str = "foo";
int fd = SYSCALL(dup(STDIN_FILENO));
SYSCALL(close(fd));
EXPECT_THROW(SYSCALL(close(fd), i, "bar", str), MockException); line = __LINE__;
int fd = KJ_SYSCALL(dup(STDIN_FILENO));
KJ_SYSCALL(close(fd));
EXPECT_THROW(KJ_SYSCALL(close(fd), i, "bar", str), MockException); line = __LINE__;
EXPECT_EQ("fatal exception: " + fileLine(__FILE__, line) + ": error from OS: close(fd): "
+ strerror(EBADF) + "; i = 123; bar; str = foo\n", mockCallback.text);
mockCallback.text.clear();
......@@ -163,8 +163,8 @@ TEST(Logging, Context) {
MockExceptionCallback::ScopedRegistration reg(mockCallback);
{
CONTEXT("foo"); int cline = __LINE__;
EXPECT_THROW(FAIL_ASSERT("bar"), MockException); int line = __LINE__;
KJ_CONTEXT("foo"); int cline = __LINE__;
EXPECT_THROW(KJ_FAIL_ASSERT("bar"), MockException); int line = __LINE__;
EXPECT_EQ("fatal exception: " + fileLine(__FILE__, cline) + ": context: foo\n"
+ fileLine(__FILE__, line) + ": bug in code: bar\n",
......@@ -174,8 +174,8 @@ TEST(Logging, Context) {
{
int i = 123;
const char* str = "qux";
CONTEXT("baz", i, "corge", str); int cline2 = __LINE__;
EXPECT_THROW(FAIL_ASSERT("bar"), MockException); line = __LINE__;
KJ_CONTEXT("baz", i, "corge", str); int cline2 = __LINE__;
EXPECT_THROW(KJ_FAIL_ASSERT("bar"), MockException); line = __LINE__;
EXPECT_EQ("fatal exception: " + fileLine(__FILE__, cline) + ": context: foo\n"
+ fileLine(__FILE__, cline2) + ": context: baz; i = 123; corge; str = qux\n"
......@@ -185,8 +185,8 @@ TEST(Logging, Context) {
}
{
CONTEXT("grault"); int cline2 = __LINE__;
EXPECT_THROW(FAIL_ASSERT("bar"), MockException); line = __LINE__;
KJ_CONTEXT("grault"); int cline2 = __LINE__;
EXPECT_THROW(KJ_FAIL_ASSERT("bar"), MockException); line = __LINE__;
EXPECT_EQ("fatal exception: " + fileLine(__FILE__, cline) + ": context: foo\n"
+ fileLine(__FILE__, cline2) + ": context: grault\n"
......
......@@ -24,7 +24,7 @@
// This file declares convenient macros for debug logging and error handling. The macros make
// it excessively easy to extract useful context information from code. Example:
//
// ASSERT(a == b, a, b, "a and b must be the same.");
// KJ_ASSERT(a == b, a, b, "a and b must be the same.");
//
// On failure, this will throw an exception whose description looks like:
//
......@@ -34,35 +34,35 @@
//
// The macros available are:
//
// * `LOG(severity, ...)`: Just writes a log message, to stderr by default (but you can intercept
// messages by implementing an ExceptionCallback). `severity` is `INFO`, `WARNING`, `ERROR`, or
// `FATAL`. If the severity is not higher than the global logging threshold, nothing will be
// written and in fact the log message won't even be evaluated.
// * `KJ_LOG(severity, ...)`: Just writes a log message, to stderr by default (but you can
// intercept messages by implementing an ExceptionCallback). `severity` is `INFO`, `WARNING`,
// `ERROR`, or `FATAL`. If the severity is not higher than the global logging threshold, nothing
// will be written and in fact the log message won't even be evaluated.
//
// * `ASSERT(condition, ...)`: Throws an exception if `condition` is false, or aborts if exceptions
// are disabled. This macro should be used to check for bugs in the surrounding code and its
// dependencies, but NOT to check for invalid input.
// * `KJ_ASSERT(condition, ...)`: Throws an exception if `condition` is false, or aborts if
// exceptions are disabled. This macro should be used to check for bugs in the surrounding code
// and its dependencies, but NOT to check for invalid input.
//
// * `REQUIRE(condition, ...)`: Like `ASSERT` but used to check preconditions -- e.g. to validate
// parameters passed from a caller. A failure indicates that the caller is buggy.
// * `KJ_REQUIRE(condition, ...)`: Like `KJ_ASSERT` but used to check preconditions -- e.g. to
// validate parameters passed from a caller. A failure indicates that the caller is buggy.
//
// * `RECOVERABLE_ASSERT(condition, ...) { ... }`: Like `ASSERT` except that if exceptions are
// * `RECOVERABLE_ASSERT(condition, ...) { ... }`: Like `KJ_ASSERT` except that if exceptions are
// disabled, instead of aborting, the following code block will be executed. This block should
// do whatever it can to fill in dummy values so that the code can continue executing, even if
// this means the eventual output will be garbage.
//
// * `RECOVERABLE_REQUIRE(condition, ...) { ... }`: Like `RECOVERABLE_ASSERT` and `REQUIRE`.
// * `RECOVERABLE_REQUIRE(condition, ...) { ... }`: Like `RECOVERABLE_ASSERT` and `KJ_REQUIRE`.
//
// * `VALIDATE_INPUT(condition, ...) { ... }`: Like `RECOVERABLE_PRECOND` but used to validate
// input that may have come from the user or some other untrusted source. Recoverability is
// required in this case.
//
// * `SYSCALL(code, ...)`: Executes `code` assuming it makes a system call. A negative return
// * `KJ_SYSCALL(code, ...)`: Executes `code` assuming it makes a system call. A negative return
// value is considered an error. EINTR is handled by retrying. Other errors are handled by
// throwing an exception. The macro also returns the call's result. For example, the following
// calls `open()` and includes the file name in any error message:
//
// int fd = SYSCALL(open(filename, O_RDONLY), filename);
// int fd = KJ_SYSCALL(open(filename, O_RDONLY), filename);
//
// * `RECOVERABLE_SYSCALL(code, ...) { ... }`: Like `RECOVERABLE_ASSERT` and `SYSCALL`. Note that
// unfortunately this macro cannot return a value since it implements control flow, but you can
......@@ -74,13 +74,13 @@
// fd = SYSCALL(open("/dev/null", O_RDONLY));
// }
//
// * `CONTEXT(...)`: Notes additional contextual information relevant to any exceptions thrown
// from within the current scope. That is, until control exits the block in which CONTEXT()
// * `KJ_CONTEXT(...)`: Notes additional contextual information relevant to any exceptions thrown
// from within the current scope. That is, until control exits the block in which KJ_CONTEXT()
// is used, if any exception is generated, it will contain the given information in its context
// chain. This is helpful because it can otherwise be very difficult to come up with error
// messages that make sense within low-level helper code. Note that the parameters to CONTEXT()
// are only evaluated if an exception is thrown. This means that any variables used must remain
// valid until the end of the scope.
// messages that make sense within low-level helper code. Note that the parameters to
// KJ_CONTEXT() are only evaluated if an exception is thrown. This implies that any variables
// used must remain valid until the end of the scope.
//
// Notes:
// * Do not write expressions with side-effects in the message content part of the macro, as the
......@@ -210,12 +210,12 @@ private:
ArrayPtr<const char> KJ_STRINGIFY(Log::Severity severity);
#define LOG(severity, ...) \
#define KJ_LOG(severity, ...) \
if (!::kj::Log::shouldLog(::kj::Log::Severity::severity)) {} else \
::kj::Log::log(__FILE__, __LINE__, ::kj::Log::Severity::severity, \
#__VA_ARGS__, __VA_ARGS__)
#define FAULT(nature, cond, ...) \
#define KJ_FAULT(nature, cond, ...) \
if (KJ_EXPECT_TRUE(cond)) {} else \
::kj::Log::fatalFault(__FILE__, __LINE__, \
::kj::Exception::Nature::nature, #cond, #__VA_ARGS__, ##__VA_ARGS__)
......@@ -226,19 +226,19 @@ ArrayPtr<const char> KJ_STRINGIFY(Log::Severity severity);
::kj::Exception::Nature::nature, #cond, #__VA_ARGS__, ##__VA_ARGS__), false) {} \
else
#define ASSERT(...) FAULT(LOCAL_BUG, __VA_ARGS__)
#define KJ_ASSERT(...) KJ_FAULT(LOCAL_BUG, __VA_ARGS__)
#define RECOVERABLE_ASSERT(...) RECOVERABLE_FAULT(LOCAL_BUG, __VA_ARGS__)
#define REQUIRE(...) FAULT(PRECONDITION, __VA_ARGS__)
#define KJ_REQUIRE(...) KJ_FAULT(PRECONDITION, __VA_ARGS__)
#define RECOVERABLE_REQUIRE(...) RECOVERABLE_FAULT(PRECONDITION, __VA_ARGS__)
#define VALIDATE_INPUT(...) RECOVERABLE_FAULT(INPUT, __VA_ARGS__)
#define FAIL_ASSERT(...) ASSERT(false, ##__VA_ARGS__)
#define KJ_FAIL_ASSERT(...) KJ_ASSERT(false, ##__VA_ARGS__)
#define FAIL_RECOVERABLE_ASSERT(...) RECOVERABLE_ASSERT(false, ##__VA_ARGS__)
#define FAIL_REQUIRE(...) REQUIRE(false, ##__VA_ARGS__)
#define KJ_FAIL_REQUIRE(...) KJ_REQUIRE(false, ##__VA_ARGS__)
#define FAIL_RECOVERABLE_REQUIRE(...) RECOVERABLE_REQUIRE(false, ##__VA_ARGS__)
#define FAIL_VALIDATE_INPUT(...) VALIDATE_INPUT(false, ##__VA_ARGS__)
#define SYSCALL(call, ...) \
#define KJ_SYSCALL(call, ...) \
::kj::Log::syscall( \
[&](){return (call);}, __FILE__, __LINE__, #call, #__VA_ARGS__, ##__VA_ARGS__)
......@@ -263,7 +263,7 @@ ArrayPtr<const char> KJ_STRINGIFY(Log::Severity severity);
_errorNumber, __FILE__, __LINE__, #code, #__VA_ARGS__, ##__VA_ARGS__); \
} while (false)
#define CONTEXT(...) \
#define KJ_CONTEXT(...) \
auto _kjContextFunc = [&](::kj::Exception& exception) { \
return ::kj::Log::addContextTo(exception, \
__FILE__, __LINE__, #__VA_ARGS__, ##__VA_ARGS__); \
......@@ -271,17 +271,17 @@ ArrayPtr<const char> KJ_STRINGIFY(Log::Severity severity);
::kj::Log::ContextImpl<decltype(_kjContextFunc)> _kjContext(_kjContextFunc)
#ifdef NDEBUG
#define DLOG(...) do {} while (false)
#define DASSERT(...) do {} while (false)
#define RECOVERABLE_DASSERT(...) do {} while (false)
#define DREQUIRE(...) do {} while (false)
#define RECOVERABLE_DREQUIRE(...) do {} while (false)
#define KJ_DLOG(...) do {} while (false)
#define KJ_DASSERT(...) do {} while (false)
#define KJ_RECOVERABLE_DASSERT(...) do {} while (false)
#define KJ_DREQUIRE(...) do {} while (false)
#define KJ_RECOVERABLE_DREQUIRE(...) do {} while (false)
#else
#define DLOG LOG
#define DASSERT ASSERT
#define RECOVERABLE_DASSERT RECOVERABLE_ASSERT
#define DREQUIRE REQUIRE
#define RECOVERABLE_DREQUIRE RECOVERABLE_REQUIRE
#define KJ_DLOG LOG
#define KJ_DASSERT KJ_ASSERT
#define KJ_RECOVERABLE_DASSERT RECOVERABLE_ASSERT
#define KJ_DREQUIRE KJ_REQUIRE
#define KJ_RECOVERABLE_DREQUIRE RECOVERABLE_REQUIRE
#endif
template <typename... Params>
......
......@@ -232,7 +232,7 @@ char* DoubleToBuffer(double value, char* buffer) {
// The snprintf should never overflow because the buffer is significantly
// larger than the precision we asked for.
DASSERT(snprintf_result > 0 && snprintf_result < kDoubleToBufferSize);
KJ_DASSERT(snprintf_result > 0 && snprintf_result < kDoubleToBufferSize);
// We need to make parsed_value volatile in order to force the compiler to
// write it out to the stack. Otherwise, it may keep the value in a
......@@ -246,7 +246,7 @@ char* DoubleToBuffer(double value, char* buffer) {
snprintf(buffer, kDoubleToBufferSize, "%.*g", DBL_DIG+2, value);
// Should never overflow; see above.
DASSERT(snprintf_result > 0 && snprintf_result < kDoubleToBufferSize);
KJ_DASSERT(snprintf_result > 0 && snprintf_result < kDoubleToBufferSize);
}
DelocalizeRadix(buffer);
......@@ -288,7 +288,7 @@ char* FloatToBuffer(float value, char* buffer) {
// The snprintf should never overflow because the buffer is significantly
// larger than the precision we asked for.
DASSERT(snprintf_result > 0 && snprintf_result < kFloatToBufferSize);
KJ_DASSERT(snprintf_result > 0 && snprintf_result < kFloatToBufferSize);
float parsed_value;
if (!safe_strtof(buffer, &parsed_value) || parsed_value != value) {
......@@ -296,7 +296,7 @@ char* FloatToBuffer(float value, char* buffer) {
snprintf(buffer, kFloatToBufferSize, "%.*g", FLT_DIG+2, value);
// Should never overflow; see above.
DASSERT(snprintf_result > 0 && snprintf_result < kFloatToBufferSize);
KJ_DASSERT(snprintf_result > 0 && snprintf_result < kFloatToBufferSize);
}
DelocalizeRadix(buffer);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment