[K/N][Runtime] Support equality and hashcode for stacktraces

This commit is contained in:
Ilya Matveev
2022-01-17 19:39:12 +07:00
committed by Space
parent 1cb509d529
commit c35b8342d8
5 changed files with 302 additions and 11 deletions
@@ -73,19 +73,47 @@ public:
return std_support::span<void* const>(buffer_.data(), size());
}
bool operator==(const StackTrace& other) const noexcept {
return std::equal(begin(), end(), other.begin(), other.end());
}
bool operator!=(const StackTrace& other) const noexcept {
return !(*this == other);
}
// Maximal stacktrace depth that can be collected due to implementation limitations.
// Note that this limitation doesn't take into account the skipFrames parameter.
// I.e. real size of a returned stacktrace will be limited by (maxDepth - skipFrames).
static constexpr size_t maxDepth =
std::min(internal::GetMaxStackTraceDepth<internal::StackTraceCapacityKind::kFixed>(), Capacity);
NO_INLINE static StackTrace current(size_t skipFrames, size_t depthLimit) {
StackTrace result;
auto fullTraceSize = internal::GetCurrentStackTrace(
skipFrames + 1, std_support::span<void*>(result.buffer_.data(), result.buffer_.size()));
result.size_ = std::min(fullTraceSize, depthLimit);
return result;
}
NO_INLINE static StackTrace current(size_t skipFrames = 0) noexcept {
// Avoid delegating to current(skipFrames, depth)
// to have the same number of "service" frames for both overloads.
StackTrace result;
result.size_ = internal::GetCurrentStackTrace(
skipFrames + 1, std_support::span<void*>(result.buffer_.data(), result.buffer_.size()));
return result;
}
struct TestSupport : private Pinned {
static StackTrace constructFrom(std::initializer_list<void*> values) {
StackTrace result;
size_t elementsCount = std::min(values.size(), result.buffer_.size());
std::copy_n(values.begin(), elementsCount, result.buffer_.begin());
result.size_ = elementsCount;
return result;
}
};
private:
size_t size_;
std::array<void*, Capacity> buffer_;
@@ -123,18 +151,44 @@ public:
return std_support::span<void* const>(buffer_.data(), size());
}
bool operator==(const StackTrace& other) const noexcept {
return std::equal(begin(), end(), other.begin(), other.end());
}
bool operator!=(const StackTrace& other) const noexcept {
return !(*this == other);
}
// Maximal stacktrace depth that can be collected due to implementation limitations.
// Note that this limitation doesn't take into account the skipFrames parameter.
// I.e. real size of a returned stacktrace will be limited by (maxDepth - skipFrames).
static constexpr size_t maxDepth = internal::GetMaxStackTraceDepth<internal::StackTraceCapacityKind::kDynamic>();
NO_INLINE static StackTrace current(size_t skipFrames = 0) {
StackTrace result;
result.buffer_ = internal::GetCurrentStackTrace(skipFrames + 1);
return result;
NO_INLINE static StackTrace current(size_t skipFrames, size_t depthLimit) {
auto traceElements = internal::GetCurrentStackTrace(skipFrames + 1);
if (traceElements.size() > depthLimit) {
traceElements.resize(depthLimit);
}
return StackTrace(std::move(traceElements));
}
NO_INLINE static StackTrace current(size_t skipFrames = 0) {
// Avoid delegating to current(skipFrames, depth)
// to have the same number of "service" frames for both overloads.
auto traceElements = internal::GetCurrentStackTrace(skipFrames + 1);
return StackTrace(std::move(traceElements));
}
struct TestSupport : private Pinned {
static StackTrace constructFrom(std::initializer_list<void*> values) {
KStdVector<void*> traceElements(values);
return StackTrace(std::move(traceElements));
}
};
private:
explicit StackTrace(KStdVector<void*>&& buffer) noexcept : buffer_(buffer) {}
KStdVector<void*> buffer_;
};
@@ -148,4 +202,16 @@ void PrintStackTraceStderr();
} // namespace kotlin
template <size_t Capacity>
struct std::hash<kotlin::StackTrace<Capacity>> {
size_t operator()(kotlin::StackTrace<Capacity> value) const {
size_t result = 0;
std::hash<void*> hasher;
for (void* p : value) {
result += kotlin::CombineHash(result, hasher(p));
}
return result;
}
};
#endif // RUNTIME_STACK_TRACE_H
@@ -18,22 +18,24 @@
using namespace kotlin;
using testing::Not;
namespace {
// Disable optimizations for these functions to avoid inlining and tail recursion optimization.
template <size_t Capacity = kDynamicCapacity>
OPTNONE StackTrace<Capacity> GetStackTrace1(size_t skipFrames = 0) {
return StackTrace<Capacity>::current(skipFrames);
OPTNONE StackTrace<Capacity> GetStackTrace1(size_t skipFrames = 0, size_t depth = StackTrace<Capacity>::maxDepth) {
return StackTrace<Capacity>::current(skipFrames, depth);
}
template <size_t Capacity = kDynamicCapacity>
OPTNONE StackTrace<Capacity> GetStackTrace2(size_t skipFrames = 0) {
return GetStackTrace1<Capacity>(skipFrames);
OPTNONE StackTrace<Capacity> GetStackTrace2(size_t skipFrames = 0, size_t depth = StackTrace<Capacity>::maxDepth) {
return GetStackTrace1<Capacity>(skipFrames, depth);
}
template <size_t Capacity = kDynamicCapacity>
OPTNONE StackTrace<Capacity> GetStackTrace3(size_t skipFrames = 0) {
return GetStackTrace2<Capacity>(skipFrames);
OPTNONE StackTrace<Capacity> GetStackTrace3(size_t skipFrames = 0, size_t depth = StackTrace<Capacity>::maxDepth) {
return GetStackTrace2<Capacity>(skipFrames, depth);
}
template <size_t Capacity = kDynamicCapacity>
@@ -69,6 +71,20 @@ TEST(StackTraceTest, StackTraceWithSkip) {
EXPECT_THAT(symbolicStackTrace[1], testing::HasSubstr("GetStackTrace3"));
}
TEST(StackTraceTest, StackTraceWithLimitedDepth) {
auto stackTrace = GetStackTrace3(/* skipFrames = */ 0, /* depth = */ 2);
auto symbolicStackTrace = GetStackTraceStrings(stackTrace.data());
ASSERT_EQ(symbolicStackTrace.size(), 2ul);
EXPECT_THAT(symbolicStackTrace[0], testing::HasSubstr("GetStackTrace1"));
EXPECT_THAT(symbolicStackTrace[1], testing::HasSubstr("GetStackTrace2"));
stackTrace = GetStackTrace3(/* skipFrames = */ 1, /* depth = */ 2);
symbolicStackTrace = GetStackTraceStrings(stackTrace.data());
ASSERT_EQ(symbolicStackTrace.size(), 2ul);
EXPECT_THAT(symbolicStackTrace[0], testing::HasSubstr("GetStackTrace2"));
EXPECT_THAT(symbolicStackTrace[1], testing::HasSubstr("GetStackTrace3"));
}
TEST(StackTraceTest, StackAllocatedTrace) {
auto stackTrace = GetStackTrace3<2>();
auto symbolicStackTrace = GetStackTraceStrings(stackTrace.data());
@@ -86,6 +102,20 @@ TEST(StackTraceTest, StackAllocatedTraceWithSkip) {
EXPECT_THAT(symbolicStackTrace[1], testing::HasSubstr("GetStackTrace3"));
}
TEST(StackTraceTest, StackAllocatedTraceWithLimitedDepth) {
auto stackTrace = GetStackTrace3<10>(/* skipFrames = */ 0, /* depth = */ 2);
auto symbolicStackTrace = GetStackTraceStrings(stackTrace.data());
ASSERT_EQ(symbolicStackTrace.size(), 2ul);
EXPECT_THAT(symbolicStackTrace[0], testing::HasSubstr("GetStackTrace1"));
EXPECT_THAT(symbolicStackTrace[1], testing::HasSubstr("GetStackTrace2"));
stackTrace = GetStackTrace3<10>(/* skipFrames = */ 1, /* depth = */ 2);
symbolicStackTrace = GetStackTraceStrings(stackTrace.data());
ASSERT_EQ(symbolicStackTrace.size(), 2ul);
EXPECT_THAT(symbolicStackTrace[0], testing::HasSubstr("GetStackTrace2"));
EXPECT_THAT(symbolicStackTrace[1], testing::HasSubstr("GetStackTrace3"));
}
TEST(StackTraceTest, EmptyStackTrace) {
constexpr size_t kSkip = 1000000;
auto stackTrace = GetStackTrace1(kSkip);
@@ -237,6 +267,122 @@ TEST(StackTraceTest, StackAllocatedIndexedAccessAndIteration) {
EXPECT_EQ(stackTrace.size(), 2ul);
}
#define EXPECT_TRACES_EQ(trace1, trace2) do { \
EXPECT_TRUE((trace1) == (trace2)); \
EXPECT_FALSE((trace1) != (trace2)); \
} while(false)
#define EXPECT_TRACES_NE(trace1, trace2) do { \
EXPECT_FALSE((trace1) == (trace2)); \
EXPECT_TRUE((trace1) != (trace2)); \
} while(false)
TEST(StackTraceTest, EqualsAndHash) {
std::hash<StackTrace<>> hasher;
StackTrace<> empty1, empty2;
EXPECT_TRACES_EQ(empty1, empty2);
EXPECT_EQ(hasher(empty1), hasher(empty2));
auto trace1 = GetStackTrace2(/* skipFrames = */ 0, /* depth = */ 2);
EXPECT_TRACES_NE(trace1, empty2);
auto trace2 = GetStackTrace2(/* skipFrames = */ 0, /* depth = */ 2);
EXPECT_TRACES_EQ(trace1, trace2);
EXPECT_EQ(hasher(trace1), hasher(trace2));
auto traceWithSkip = GetStackTrace2(/* skipFrames = */ 1, /* depth = */ 2);
EXPECT_TRACES_NE(trace1, traceWithSkip);
auto anotherTrace = GetStackTrace3();
EXPECT_TRACES_NE(trace1, anotherTrace);
trace1 = StackTrace<>::TestSupport::constructFrom(
{ reinterpret_cast<void*>(42), reinterpret_cast<void*>(43) });
trace2 = StackTrace<>::TestSupport::constructFrom(
{ reinterpret_cast<void*>(44), reinterpret_cast<void*>(45) });
EXPECT_NE(hasher(trace1), hasher(trace2));
}
TEST(StackTraceTest, StackAllocatedEqualsAndHash) {
constexpr size_t capacity = 10;
std::hash<StackTrace<capacity>> hasher;
StackTrace<capacity> empty1, empty2;
EXPECT_TRACES_EQ(empty1, empty2);
EXPECT_EQ(hasher(empty1), hasher(empty2));
auto trace1 = GetStackTrace2<capacity>(/* skipFrames = */ 0, /* depth = */ 2);;
EXPECT_TRACES_NE(trace1, empty2);
auto trace2 = GetStackTrace2<capacity>(/* skipFrames = */ 0, /* depth = */ 2);;
EXPECT_TRACES_EQ(trace1, trace2);
EXPECT_EQ(hasher(trace1), hasher(trace2));
auto traceWithSkip = GetStackTrace2<capacity>(/* skipFrames = */ 1, /* depth = */ 2);
EXPECT_TRACES_NE(trace1, traceWithSkip);
auto anotherTrace = GetStackTrace3<capacity>();
EXPECT_TRACES_NE(trace1, anotherTrace);
trace1 = StackTrace<capacity>::TestSupport::constructFrom(
{ reinterpret_cast<void*>(42), reinterpret_cast<void*>(43) });
trace2 = StackTrace<capacity>::TestSupport::constructFrom(
{ reinterpret_cast<void*>(44), reinterpret_cast<void*>(45) });
EXPECT_NE(hasher(trace1), hasher(trace2));
}
TEST(StackTraceTest, StoreInHashSet) {
constexpr size_t capacity = 10;
KStdUnorderedSet<StackTrace<capacity>> set;
StackTrace<capacity> empty;
StackTrace<capacity> trace1 = GetStackTrace1<capacity>();
StackTrace<capacity> trace2 = GetStackTrace2<capacity>();
EXPECT_THAT(set.find(empty), set.end());
EXPECT_THAT(set.find(trace1), set.end());
EXPECT_THAT(set.find(trace2), set.end());
set.insert(empty);
EXPECT_THAT(set.find(empty), Not(set.end()));
EXPECT_THAT(set.find(trace1), set.end());
EXPECT_THAT(set.find(trace2), set.end());
set.insert(trace1);
EXPECT_THAT(set.find(empty), Not(set.end()));
EXPECT_THAT(set.find(trace1), Not(set.end()));
EXPECT_THAT(set.find(trace2), set.end());
set.insert(trace2);
EXPECT_THAT(set.find(empty), Not(set.end()));
EXPECT_THAT(set.find(trace1), Not(set.end()));
EXPECT_THAT(set.find(trace2), Not(set.end()));
}
TEST(StackTraceTest, StackAllocatedStoreInHashSet) {
KStdUnorderedSet<StackTrace<>> set;
StackTrace<> empty;
StackTrace<> trace1 = GetStackTrace1();
StackTrace<> trace2 = GetStackTrace2();
EXPECT_THAT(set.find(empty), set.end());
EXPECT_THAT(set.find(trace1), set.end());
EXPECT_THAT(set.find(trace2), set.end());
set.insert(empty);
EXPECT_THAT(set.find(empty), Not(set.end()));
EXPECT_THAT(set.find(trace1), set.end());
EXPECT_THAT(set.find(trace2), set.end());
set.insert(trace1);
EXPECT_THAT(set.find(empty), Not(set.end()));
EXPECT_THAT(set.find(trace1), Not(set.end()));
EXPECT_THAT(set.find(trace2), set.end());
set.insert(trace2);
EXPECT_THAT(set.find(empty), Not(set.end()));
EXPECT_THAT(set.find(trace1), Not(set.end()));
EXPECT_THAT(set.find(trace2), Not(set.end()));
}
TEST(StackTraceDeathTest, PrintStackTrace) {
EXPECT_DEATH(
{ AbortWithStackTrace(0); },
@@ -0,0 +1,71 @@
/*
* Copyright 2010-2022 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license
* that can be found in the LICENSE file.
*/
#include "Utils.hpp"
#include <climits>
#include <cstdint>
namespace {
// Hash combine functions derived from boost ones.
// Copyright 2005-2014 Daniel James.
template <typename X, typename R>
constexpr auto rotl32(X x, R r) noexcept { return (x << r) | (x >> (32 - r)); }
template<size_t Bits>
struct HashCompineImpl {
template <typename SizeT>
constexpr static SizeT fn(SizeT seed, SizeT value) {
seed ^= value + 0x9e3779b9 + (seed<<6) + (seed>>2);
return seed;
}
};
template<>
struct HashCompineImpl<32> {
constexpr static uint32_t fn(uint32_t h1, uint32_t k1) {
const uint32_t c1 = 0xcc9e2d51;
const uint32_t c2 = 0x1b873593;
k1 *= c1;
k1 = rotl32(k1,15);
k1 *= c2;
h1 ^= k1;
h1 = rotl32(h1,13);
h1 = h1*5+0xe6546b64;
return h1;
}
};
template<>
struct HashCompineImpl<64> {
constexpr static uint64_t fn(uint64_t h, uint64_t k) {
const uint64_t m = (uint64_t(0xc6a4a793) << 32) + 0x5bd1e995;
const int r = 47;
k *= m;
k ^= k >> r;
k *= m;
h ^= k;
h *= m;
// Completely arbitrary number, to prevent 0's
// from hashing to 0.
h += 0xe6546b64;
return h;
}
};
} // namespace
size_t kotlin::CombineHash(size_t seed, size_t value) {
return HashCompineImpl<sizeof(std::size_t) * CHAR_BIT>::fn(seed, value);
}
@@ -6,6 +6,8 @@
#ifndef RUNTIME_UTILS_H
#define RUNTIME_UTILS_H
#include <cstddef>
#include <type_traits>
namespace kotlin {
@@ -76,6 +78,8 @@ private:
T2 oldValue_;
};
size_t CombineHash(size_t seed, size_t value);
} // namespace kotlin
#endif // RUNTIME_UTILS_H
+4
View File
@@ -129,6 +129,10 @@ the Kotlin IntelliJ IDEA plugin:
- License: SUN ([license/third_party/sun_license.txt][sun])
- Origin: Copyright (C) 1993 by Sun Microsystems, Inc.
- Path: kotlin-native/runtime/src/main/cpp/Utils.cpp
- License: Boost Software License 1.0 ([license/third_party/boost_LICENSE.txt][boost])
- Origin: Derived from boost hash functions, Copyright 2005-2014 Daniel James
## Kotlin Test Data
The following source code is used for testing the Kotlin compiler and/or plugin and is not incorporated into