Commit d067d620 authored by Kenton Varda's avatar Kenton Varda Committed by GitHub

Merge pull request #567 from ecatmur/memcpy-null-src

Memcpy null src
parents 2803eb52 886cc3c4
......@@ -371,15 +371,15 @@ struct WireHelpers {
#endif
static KJ_ALWAYS_INLINE(void zeroMemory(byte* ptr, ByteCount32 count)) {
memset(ptr, 0, unbound(count / BYTES));
if (count != 0u) memset(ptr, 0, unbound(count / BYTES));
}
static KJ_ALWAYS_INLINE(void zeroMemory(word* ptr, WordCountN<29> count)) {
memset(ptr, 0, unbound(count * BYTES_PER_WORD / BYTES));
if (count != 0u) memset(ptr, 0, unbound(count * BYTES_PER_WORD / BYTES));
}
static KJ_ALWAYS_INLINE(void zeroMemory(WirePointer* ptr, WirePointerCountN<29> count)) {
memset(ptr, 0, unbound(count * BYTES_PER_POINTER / BYTES));
if (count != 0u) memset(ptr, 0, unbound(count * BYTES_PER_POINTER / BYTES));
}
static KJ_ALWAYS_INLINE(void zeroMemory(WirePointer* ptr)) {
......@@ -388,20 +388,20 @@ struct WireHelpers {
template <typename T>
static inline void zeroMemory(kj::ArrayPtr<T> array) {
memset(array.begin(), 0, array.size() * sizeof(array[0]));
if (array.size() != 0u) memset(array.begin(), 0, array.size() * sizeof(array[0]));
}
static KJ_ALWAYS_INLINE(void copyMemory(byte* to, const byte* from, ByteCount32 count)) {
memcpy(to, from, unbound(count / BYTES));
if (count != 0u) memcpy(to, from, unbound(count / BYTES));
}
static KJ_ALWAYS_INLINE(void copyMemory(word* to, const word* from, WordCountN<29> count)) {
memcpy(to, from, unbound(count * BYTES_PER_WORD / BYTES));
if (count != 0u) memcpy(to, from, unbound(count * BYTES_PER_WORD / BYTES));
}
static KJ_ALWAYS_INLINE(void copyMemory(WirePointer* to, const WirePointer* from,
WirePointerCountN<29> count)) {
memcpy(to, from, unbound(count * BYTES_PER_POINTER / BYTES));
if (count != 0u) memcpy(to, from, unbound(count * BYTES_PER_POINTER / BYTES));
}
template <typename T>
......@@ -412,14 +412,14 @@ struct WireHelpers {
// TODO(cleanup): Turn these into a .copyTo() method of ArrayPtr?
template <typename T>
static inline void copyMemory(T* to, kj::ArrayPtr<T> from) {
memcpy(to, from.begin(), from.size() * sizeof(from[0]));
if (from.size() != 0u) memcpy(to, from.begin(), from.size() * sizeof(from[0]));
}
template <typename T>
static inline void copyMemory(T* to, kj::ArrayPtr<const T> from) {
memcpy(to, from.begin(), from.size() * sizeof(from[0]));
if (from.size() != 0u) memcpy(to, from.begin(), from.size() * sizeof(from[0]));
}
static KJ_ALWAYS_INLINE(void copyMemory(char* to, kj::StringPtr from)) {
memcpy(to, from.begin(), from.size() * sizeof(from[0]));
if (from.size() != 0u) memcpy(to, from.begin(), from.size() * sizeof(from[0]));
}
static KJ_ALWAYS_INLINE(bool boundsCheck(
......
......@@ -94,7 +94,9 @@ void expectPacksTo(kj::ArrayPtr<const byte> unpackedUnaligned, kj::ArrayPtr<cons
// Make a guaranteed-to-be-aligned copy of the unpacked buffer.
kj::Array<word> unpackedWords = kj::heapArray<word>(unpackedSizeInWords);
memcpy(unpackedWords.begin(), unpackedUnaligned.begin(), unpackedUnaligned.size());
if (unpackedUnaligned.size() != 0u) {
memcpy(unpackedWords.begin(), unpackedUnaligned.begin(), unpackedUnaligned.size());
}
kj::ArrayPtr<const byte> unpacked = unpackedWords.asBytes();
// -----------------------------------------------------------------
......
......@@ -682,7 +682,9 @@ struct CopyConstructArray_;
template <typename T, bool move>
struct CopyConstructArray_<T, T*, move, true> {
static inline T* apply(T* __restrict__ pos, T* start, T* end) {
memcpy(pos, start, reinterpret_cast<byte*>(end) - reinterpret_cast<byte*>(start));
if (end != start) {
memcpy(pos, start, reinterpret_cast<byte*>(end) - reinterpret_cast<byte*>(start));
}
return pos + (end - start);
}
};
......@@ -690,7 +692,9 @@ struct CopyConstructArray_<T, T*, move, true> {
template <typename T>
struct CopyConstructArray_<T, const T*, false, true> {
static inline T* apply(T* __restrict__ pos, const T* start, const T* end) {
memcpy(pos, start, reinterpret_cast<const byte*>(end) - reinterpret_cast<const byte*>(start));
if (end != start) {
memcpy(pos, start, reinterpret_cast<const byte*>(end) - reinterpret_cast<const byte*>(start));
}
return pos + (end - start);
}
};
......
......@@ -219,6 +219,7 @@ namespace _ { // private
struct IdentifierToString {
inline String operator()(char first, const Array<char>& rest) const {
if (rest.size() == 0) return heapString(&first, 1);
String result = heapString(rest.size() + 1);
result[0] = first;
memcpy(result.begin() + 1, rest.begin(), rest.size());
......
......@@ -54,6 +54,12 @@ TEST(String, Str) {
EXPECT_EQ("foo", str(mv(f)));
}
TEST(String, Nullptr) {
EXPECT_EQ(String(nullptr), "");
EXPECT_EQ(StringPtr(String(nullptr)).size(), 0u);
EXPECT_EQ(StringPtr(String(nullptr))[0], '\0');
}
TEST(String, StartsEndsWith) {
EXPECT_TRUE(StringPtr("foobar").startsWith("foo"));
EXPECT_FALSE(StringPtr("foobar").startsWith("bar"));
......
......@@ -111,7 +111,9 @@ String heapString(size_t size) {
String heapString(const char* value, size_t size) {
char* buffer = _::HeapArrayDisposer::allocate<char>(size + 1);
memcpy(buffer, value, size);
if (size != 0u) {
memcpy(buffer, value, size);
}
buffer[size] = '\0';
return String(buffer, size, _::HeapArrayDisposer::instance);
}
......
......@@ -445,7 +445,7 @@ inline String Stringifier::operator*(const Array<T>& arr) const {
// =======================================================================================
// Inline implementation details.
inline StringPtr::StringPtr(const String& value): content(value.begin(), value.size() + 1) {}
inline StringPtr::StringPtr(const String& value): content(value.cStr(), value.size() + 1) {}
inline constexpr StringPtr::operator ArrayPtr<const char>() const {
return ArrayPtr<const char>(content.begin(), content.size() - 1);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment