[K/N] Fix SP synchronization

This commit is contained in:
Alexander Shabalin
2023-08-01 15:06:39 +02:00
committed by Space Team
parent d70f891a82
commit 5cbf5ad529
4 changed files with 18 additions and 9 deletions
@@ -81,17 +81,17 @@ mm::SafePointActivator::~SafePointActivator() {
}
}
ALWAYS_INLINE void mm::safePoint() noexcept {
ALWAYS_INLINE void mm::safePoint(std::memory_order fastPathOrder) noexcept {
AssertThreadState(ThreadState::kRunnable);
auto action = safePointAction.load(std::memory_order_relaxed);
auto action = safePointAction.load(fastPathOrder);
if (__builtin_expect(action != nullptr, false)) {
slowPath();
}
}
ALWAYS_INLINE void mm::safePoint(mm::ThreadData& threadData) noexcept {
ALWAYS_INLINE void mm::safePoint(mm::ThreadData& threadData, std::memory_order fastPathOrder) noexcept {
AssertThreadState(&threadData, ThreadState::kRunnable);
auto action = safePointAction.load(std::memory_order_relaxed);
auto action = safePointAction.load(fastPathOrder);
if (__builtin_expect(action != nullptr, false)) {
slowPath(threadData);
}
@@ -5,6 +5,7 @@
#pragma once
#include <atomic>
#include <utility>
#include "Utils.hpp"
@@ -32,8 +33,8 @@ private:
bool active_;
};
void safePoint() noexcept;
void safePoint(ThreadData& threadData) noexcept;
void safePoint(std::memory_order fastPathOrder = std::memory_order_relaxed) noexcept;
void safePoint(ThreadData& threadData, std::memory_order fastPathOrder = std::memory_order_relaxed) noexcept;
namespace test_support {
@@ -30,8 +30,15 @@ std::atomic<bool> kotlin::mm::internal::gSuspensionRequested = false;
kotlin::ThreadState kotlin::mm::ThreadSuspensionData::setState(kotlin::ThreadState newState) noexcept {
ThreadState oldState = state_.exchange(newState);
if (oldState == ThreadState::kNative && newState == ThreadState::kRunnable) {
// must use already acquired ThreadData because TLS may be in invalid state e.g. during thread detach
safePoint(threadData_);
// Must use already acquired `ThreadData` because TLS may be in invalid state e.g. during thread detach.
// Also, this must load SP in sequentially consistent order, because GC
// may have touched this thread's data, and we must synchronize before
// continuing.
// GC would have either changed stored SP handler (with seq_cst),
// or would have changed `internal::gSuspensionRequested` (with seq_cst),
// so, loading SP here, or checking `internal::gSuspensionRequested` in
// `suspendIfRequested` is enough.
safePoint(threadData_, std::memory_order_seq_cst);
}
return oldState;
}
@@ -22,7 +22,8 @@ extern std::atomic<bool> gSuspensionRequested;
} // namespace internal
inline bool IsThreadSuspensionRequested() noexcept {
// TODO: Consider using a more relaxed memory order.
// Must use seq_cst ordering for synchronization with GC
// in native->runnable transition.
return internal::gSuspensionRequested.load();
}