diff --git a/kotlin-native/runtime/src/main/cpp/StackTrace.hpp b/kotlin-native/runtime/src/main/cpp/StackTrace.hpp index 380cf699ec8..c8636031816 100644 --- a/kotlin-native/runtime/src/main/cpp/StackTrace.hpp +++ b/kotlin-native/runtime/src/main/cpp/StackTrace.hpp @@ -73,19 +73,47 @@ public: return std_support::span(buffer_.data(), size()); } + bool operator==(const StackTrace& other) const noexcept { + return std::equal(begin(), end(), other.begin(), other.end()); + } + + bool operator!=(const StackTrace& other) const noexcept { + return !(*this == other); + } + // Maximal stacktrace depth that can be collected due to implementation limitations. // Note that this limitation doesn't take into account the skipFrames parameter. // I.e. real size of a returned stacktrace will be limited by (maxDepth - skipFrames). static constexpr size_t maxDepth = std::min(internal::GetMaxStackTraceDepth(), Capacity); + NO_INLINE static StackTrace current(size_t skipFrames, size_t depthLimit) { + StackTrace result; + auto fullTraceSize = internal::GetCurrentStackTrace( + skipFrames + 1, std_support::span(result.buffer_.data(), result.buffer_.size())); + result.size_ = std::min(fullTraceSize, depthLimit); + return result; + } + NO_INLINE static StackTrace current(size_t skipFrames = 0) noexcept { + // Avoid delegating to current(skipFrames, depth) + // to have the same number of "service" frames for both overloads. StackTrace result; result.size_ = internal::GetCurrentStackTrace( skipFrames + 1, std_support::span(result.buffer_.data(), result.buffer_.size())); return result; } + struct TestSupport : private Pinned { + static StackTrace constructFrom(std::initializer_list values) { + StackTrace result; + size_t elementsCount = std::min(values.size(), result.buffer_.size()); + std::copy_n(values.begin(), elementsCount, result.buffer_.begin()); + result.size_ = elementsCount; + return result; + } + }; + private: size_t size_; std::array buffer_; @@ -123,18 +151,44 @@ public: return std_support::span(buffer_.data(), size()); } + bool operator==(const StackTrace& other) const noexcept { + return std::equal(begin(), end(), other.begin(), other.end()); + } + + bool operator!=(const StackTrace& other) const noexcept { + return !(*this == other); + } + // Maximal stacktrace depth that can be collected due to implementation limitations. // Note that this limitation doesn't take into account the skipFrames parameter. // I.e. real size of a returned stacktrace will be limited by (maxDepth - skipFrames). static constexpr size_t maxDepth = internal::GetMaxStackTraceDepth(); - NO_INLINE static StackTrace current(size_t skipFrames = 0) { - StackTrace result; - result.buffer_ = internal::GetCurrentStackTrace(skipFrames + 1); - return result; + NO_INLINE static StackTrace current(size_t skipFrames, size_t depthLimit) { + auto traceElements = internal::GetCurrentStackTrace(skipFrames + 1); + if (traceElements.size() > depthLimit) { + traceElements.resize(depthLimit); + } + return StackTrace(std::move(traceElements)); } + NO_INLINE static StackTrace current(size_t skipFrames = 0) { + // Avoid delegating to current(skipFrames, depth) + // to have the same number of "service" frames for both overloads. + auto traceElements = internal::GetCurrentStackTrace(skipFrames + 1); + return StackTrace(std::move(traceElements)); + } + + struct TestSupport : private Pinned { + static StackTrace constructFrom(std::initializer_list values) { + KStdVector traceElements(values); + return StackTrace(std::move(traceElements)); + } + }; + private: + explicit StackTrace(KStdVector&& buffer) noexcept : buffer_(buffer) {} + KStdVector buffer_; }; @@ -148,4 +202,16 @@ void PrintStackTraceStderr(); } // namespace kotlin +template +struct std::hash> { + size_t operator()(kotlin::StackTrace value) const { + size_t result = 0; + std::hash hasher; + for (void* p : value) { + result += kotlin::CombineHash(result, hasher(p)); + } + return result; + } +}; + #endif // RUNTIME_STACK_TRACE_H \ No newline at end of file diff --git a/kotlin-native/runtime/src/main/cpp/StackTraceTest.cpp b/kotlin-native/runtime/src/main/cpp/StackTraceTest.cpp index cafcbc5538d..84b8e0255fd 100644 --- a/kotlin-native/runtime/src/main/cpp/StackTraceTest.cpp +++ b/kotlin-native/runtime/src/main/cpp/StackTraceTest.cpp @@ -18,22 +18,24 @@ using namespace kotlin; +using testing::Not; + namespace { // Disable optimizations for these functions to avoid inlining and tail recursion optimization. template -OPTNONE StackTrace GetStackTrace1(size_t skipFrames = 0) { - return StackTrace::current(skipFrames); +OPTNONE StackTrace GetStackTrace1(size_t skipFrames = 0, size_t depth = StackTrace::maxDepth) { + return StackTrace::current(skipFrames, depth); } template -OPTNONE StackTrace GetStackTrace2(size_t skipFrames = 0) { - return GetStackTrace1(skipFrames); +OPTNONE StackTrace GetStackTrace2(size_t skipFrames = 0, size_t depth = StackTrace::maxDepth) { + return GetStackTrace1(skipFrames, depth); } template -OPTNONE StackTrace GetStackTrace3(size_t skipFrames = 0) { - return GetStackTrace2(skipFrames); +OPTNONE StackTrace GetStackTrace3(size_t skipFrames = 0, size_t depth = StackTrace::maxDepth) { + return GetStackTrace2(skipFrames, depth); } template @@ -69,6 +71,20 @@ TEST(StackTraceTest, StackTraceWithSkip) { EXPECT_THAT(symbolicStackTrace[1], testing::HasSubstr("GetStackTrace3")); } +TEST(StackTraceTest, StackTraceWithLimitedDepth) { + auto stackTrace = GetStackTrace3(/* skipFrames = */ 0, /* depth = */ 2); + auto symbolicStackTrace = GetStackTraceStrings(stackTrace.data()); + ASSERT_EQ(symbolicStackTrace.size(), 2ul); + EXPECT_THAT(symbolicStackTrace[0], testing::HasSubstr("GetStackTrace1")); + EXPECT_THAT(symbolicStackTrace[1], testing::HasSubstr("GetStackTrace2")); + + stackTrace = GetStackTrace3(/* skipFrames = */ 1, /* depth = */ 2); + symbolicStackTrace = GetStackTraceStrings(stackTrace.data()); + ASSERT_EQ(symbolicStackTrace.size(), 2ul); + EXPECT_THAT(symbolicStackTrace[0], testing::HasSubstr("GetStackTrace2")); + EXPECT_THAT(symbolicStackTrace[1], testing::HasSubstr("GetStackTrace3")); +} + TEST(StackTraceTest, StackAllocatedTrace) { auto stackTrace = GetStackTrace3<2>(); auto symbolicStackTrace = GetStackTraceStrings(stackTrace.data()); @@ -86,6 +102,20 @@ TEST(StackTraceTest, StackAllocatedTraceWithSkip) { EXPECT_THAT(symbolicStackTrace[1], testing::HasSubstr("GetStackTrace3")); } +TEST(StackTraceTest, StackAllocatedTraceWithLimitedDepth) { + auto stackTrace = GetStackTrace3<10>(/* skipFrames = */ 0, /* depth = */ 2); + auto symbolicStackTrace = GetStackTraceStrings(stackTrace.data()); + ASSERT_EQ(symbolicStackTrace.size(), 2ul); + EXPECT_THAT(symbolicStackTrace[0], testing::HasSubstr("GetStackTrace1")); + EXPECT_THAT(symbolicStackTrace[1], testing::HasSubstr("GetStackTrace2")); + + stackTrace = GetStackTrace3<10>(/* skipFrames = */ 1, /* depth = */ 2); + symbolicStackTrace = GetStackTraceStrings(stackTrace.data()); + ASSERT_EQ(symbolicStackTrace.size(), 2ul); + EXPECT_THAT(symbolicStackTrace[0], testing::HasSubstr("GetStackTrace2")); + EXPECT_THAT(symbolicStackTrace[1], testing::HasSubstr("GetStackTrace3")); +} + TEST(StackTraceTest, EmptyStackTrace) { constexpr size_t kSkip = 1000000; auto stackTrace = GetStackTrace1(kSkip); @@ -237,6 +267,122 @@ TEST(StackTraceTest, StackAllocatedIndexedAccessAndIteration) { EXPECT_EQ(stackTrace.size(), 2ul); } +#define EXPECT_TRACES_EQ(trace1, trace2) do { \ + EXPECT_TRUE((trace1) == (trace2)); \ + EXPECT_FALSE((trace1) != (trace2)); \ +} while(false) + +#define EXPECT_TRACES_NE(trace1, trace2) do { \ + EXPECT_FALSE((trace1) == (trace2)); \ + EXPECT_TRUE((trace1) != (trace2)); \ +} while(false) + +TEST(StackTraceTest, EqualsAndHash) { + std::hash> hasher; + + StackTrace<> empty1, empty2; + EXPECT_TRACES_EQ(empty1, empty2); + EXPECT_EQ(hasher(empty1), hasher(empty2)); + + auto trace1 = GetStackTrace2(/* skipFrames = */ 0, /* depth = */ 2); + EXPECT_TRACES_NE(trace1, empty2); + + auto trace2 = GetStackTrace2(/* skipFrames = */ 0, /* depth = */ 2); + EXPECT_TRACES_EQ(trace1, trace2); + EXPECT_EQ(hasher(trace1), hasher(trace2)); + + auto traceWithSkip = GetStackTrace2(/* skipFrames = */ 1, /* depth = */ 2); + EXPECT_TRACES_NE(trace1, traceWithSkip); + + auto anotherTrace = GetStackTrace3(); + EXPECT_TRACES_NE(trace1, anotherTrace); + + trace1 = StackTrace<>::TestSupport::constructFrom( + { reinterpret_cast(42), reinterpret_cast(43) }); + trace2 = StackTrace<>::TestSupport::constructFrom( + { reinterpret_cast(44), reinterpret_cast(45) }); + EXPECT_NE(hasher(trace1), hasher(trace2)); +} + +TEST(StackTraceTest, StackAllocatedEqualsAndHash) { + constexpr size_t capacity = 10; + std::hash> hasher; + + StackTrace empty1, empty2; + EXPECT_TRACES_EQ(empty1, empty2); + EXPECT_EQ(hasher(empty1), hasher(empty2)); + + auto trace1 = GetStackTrace2(/* skipFrames = */ 0, /* depth = */ 2);; + EXPECT_TRACES_NE(trace1, empty2); + + auto trace2 = GetStackTrace2(/* skipFrames = */ 0, /* depth = */ 2);; + EXPECT_TRACES_EQ(trace1, trace2); + EXPECT_EQ(hasher(trace1), hasher(trace2)); + + auto traceWithSkip = GetStackTrace2(/* skipFrames = */ 1, /* depth = */ 2); + EXPECT_TRACES_NE(trace1, traceWithSkip); + + auto anotherTrace = GetStackTrace3(); + EXPECT_TRACES_NE(trace1, anotherTrace); + + trace1 = StackTrace::TestSupport::constructFrom( + { reinterpret_cast(42), reinterpret_cast(43) }); + trace2 = StackTrace::TestSupport::constructFrom( + { reinterpret_cast(44), reinterpret_cast(45) }); + EXPECT_NE(hasher(trace1), hasher(trace2)); +} + +TEST(StackTraceTest, StoreInHashSet) { + constexpr size_t capacity = 10; + KStdUnorderedSet> set; + StackTrace empty; + StackTrace trace1 = GetStackTrace1(); + StackTrace trace2 = GetStackTrace2(); + EXPECT_THAT(set.find(empty), set.end()); + EXPECT_THAT(set.find(trace1), set.end()); + EXPECT_THAT(set.find(trace2), set.end()); + + set.insert(empty); + EXPECT_THAT(set.find(empty), Not(set.end())); + EXPECT_THAT(set.find(trace1), set.end()); + EXPECT_THAT(set.find(trace2), set.end()); + + set.insert(trace1); + EXPECT_THAT(set.find(empty), Not(set.end())); + EXPECT_THAT(set.find(trace1), Not(set.end())); + EXPECT_THAT(set.find(trace2), set.end()); + + set.insert(trace2); + EXPECT_THAT(set.find(empty), Not(set.end())); + EXPECT_THAT(set.find(trace1), Not(set.end())); + EXPECT_THAT(set.find(trace2), Not(set.end())); +} + +TEST(StackTraceTest, StackAllocatedStoreInHashSet) { + KStdUnorderedSet> set; + StackTrace<> empty; + StackTrace<> trace1 = GetStackTrace1(); + StackTrace<> trace2 = GetStackTrace2(); + EXPECT_THAT(set.find(empty), set.end()); + EXPECT_THAT(set.find(trace1), set.end()); + EXPECT_THAT(set.find(trace2), set.end()); + + set.insert(empty); + EXPECT_THAT(set.find(empty), Not(set.end())); + EXPECT_THAT(set.find(trace1), set.end()); + EXPECT_THAT(set.find(trace2), set.end()); + + set.insert(trace1); + EXPECT_THAT(set.find(empty), Not(set.end())); + EXPECT_THAT(set.find(trace1), Not(set.end())); + EXPECT_THAT(set.find(trace2), set.end()); + + set.insert(trace2); + EXPECT_THAT(set.find(empty), Not(set.end())); + EXPECT_THAT(set.find(trace1), Not(set.end())); + EXPECT_THAT(set.find(trace2), Not(set.end())); +} + TEST(StackTraceDeathTest, PrintStackTrace) { EXPECT_DEATH( { AbortWithStackTrace(0); }, diff --git a/kotlin-native/runtime/src/main/cpp/Utils.cpp b/kotlin-native/runtime/src/main/cpp/Utils.cpp new file mode 100644 index 00000000000..9dacec6b375 --- /dev/null +++ b/kotlin-native/runtime/src/main/cpp/Utils.cpp @@ -0,0 +1,71 @@ +/* + * Copyright 2010-2022 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license + * that can be found in the LICENSE file. + */ + +#include "Utils.hpp" + +#include +#include + +namespace { + +// Hash combine functions derived from boost ones. +// Copyright 2005-2014 Daniel James. + +template +constexpr auto rotl32(X x, R r) noexcept { return (x << r) | (x >> (32 - r)); } + +template +struct HashCompineImpl { + template + constexpr static SizeT fn(SizeT seed, SizeT value) { + seed ^= value + 0x9e3779b9 + (seed<<6) + (seed>>2); + return seed; + } +}; + +template<> +struct HashCompineImpl<32> { + constexpr static uint32_t fn(uint32_t h1, uint32_t k1) { + const uint32_t c1 = 0xcc9e2d51; + const uint32_t c2 = 0x1b873593; + + k1 *= c1; + k1 = rotl32(k1,15); + k1 *= c2; + + h1 ^= k1; + h1 = rotl32(h1,13); + h1 = h1*5+0xe6546b64; + + return h1; + } +}; + +template<> +struct HashCompineImpl<64> { + constexpr static uint64_t fn(uint64_t h, uint64_t k) { + const uint64_t m = (uint64_t(0xc6a4a793) << 32) + 0x5bd1e995; + const int r = 47; + + k *= m; + k ^= k >> r; + k *= m; + + h ^= k; + h *= m; + + // Completely arbitrary number, to prevent 0's + // from hashing to 0. + h += 0xe6546b64; + + return h; + } +}; + +} // namespace + +size_t kotlin::CombineHash(size_t seed, size_t value) { + return HashCompineImpl::fn(seed, value); +} \ No newline at end of file diff --git a/kotlin-native/runtime/src/main/cpp/Utils.hpp b/kotlin-native/runtime/src/main/cpp/Utils.hpp index 9880130120d..6258f1194ef 100644 --- a/kotlin-native/runtime/src/main/cpp/Utils.hpp +++ b/kotlin-native/runtime/src/main/cpp/Utils.hpp @@ -6,6 +6,8 @@ #ifndef RUNTIME_UTILS_H #define RUNTIME_UTILS_H +#include + #include namespace kotlin { @@ -76,6 +78,8 @@ private: T2 oldValue_; }; +size_t CombineHash(size_t seed, size_t value); + } // namespace kotlin #endif // RUNTIME_UTILS_H diff --git a/license/README.md b/license/README.md index 2f19d71d141..f5699787c3b 100644 --- a/license/README.md +++ b/license/README.md @@ -128,7 +128,11 @@ the Kotlin IntelliJ IDEA plugin: - Path: libraries/stdlib/wasm/src/kotlin/math/fdlibm/ - License: SUN ([license/third_party/sun_license.txt][sun]) - Origin: Copyright (C) 1993 by Sun Microsystems, Inc. - + + - Path: kotlin-native/runtime/src/main/cpp/Utils.cpp + - License: Boost Software License 1.0 ([license/third_party/boost_LICENSE.txt][boost]) + - Origin: Derived from boost hash functions, Copyright 2005-2014 Daniel James + ## Kotlin Test Data The following source code is used for testing the Kotlin compiler and/or plugin and is not incorporated into