[K/N] Remove std_support allocation code ^KT-59008

This commit is contained in:
Alexander Shabalin
2023-08-10 19:31:13 +02:00
committed by Space Team
parent 002cd011d4
commit 49814ecb8a
110 changed files with 557 additions and 1078 deletions
-2
View File
@@ -23,8 +23,6 @@
* For `extern "C"` declarations emulate namespaces with `Kotlin_[module_name]_` prefixes. * For `extern "C"` declarations emulate namespaces with `Kotlin_[module_name]_` prefixes.
* To mark type as move-only, privately inherit from `kotlin::MoveOnly` * To mark type as move-only, privately inherit from `kotlin::MoveOnly`
* To mark type unmovable and uncopyable, privately inherit from `kotlin::Pinned` * To mark type unmovable and uncopyable, privately inherit from `kotlin::Pinned`
* Use `std_support::*` containers and smart pointers instead of `std::*` ones. The former ones default to runtime-specific allocator.
* Use `new (std_support::kalloc) T(...)` (defined in `std_support/New.hpp` instead of `new T(...)` and `std_support::kdelete(ptr)` instead of `delete ptr`. The former ones use runtime-specific allocator.
## Naming ## Naming
@@ -7,10 +7,10 @@
#define CUSTOM_ALLOC_CPP_ATOMICSTACK_HPP_ #define CUSTOM_ALLOC_CPP_ATOMICSTACK_HPP_
#include <atomic> #include <atomic>
#include <vector>
#include "KAssert.h" #include "KAssert.h"
#include "Utils.hpp" #include "Utils.hpp"
#include "std_support/Vector.hpp"
namespace kotlin::alloc { namespace kotlin::alloc {
@@ -91,8 +91,8 @@ public:
} }
// Test method // Test method
std_support::vector<T*> GetElements() { std::vector<T*> GetElements() {
std_support::vector<T*> elements; std::vector<T*> elements;
T* elm = stack_.load(); T* elm = stack_.load();
while (elm) { while (elm) {
elements.push_back(elm); elements.push_back(elm);
@@ -26,15 +26,15 @@ struct Element {
TEST(AtomicStackTest, StressPushPop) { TEST(AtomicStackTest, StressPushPop) {
alloc::AtomicStack<Element> ready; alloc::AtomicStack<Element> ready;
alloc::AtomicStack<Element> used; alloc::AtomicStack<Element> used;
std_support::vector<Element> elements(1000); std::vector<Element> elements(1000);
std_support::vector<Element*> expected; std::vector<Element*> expected;
for (auto& element : elements) { for (auto& element : elements) {
ready.Push(&element); ready.Push(&element);
expected.push_back(&element); expected.push_back(&element);
} }
std::atomic<bool> canStart = false; std::atomic<bool> canStart = false;
std_support::vector<ScopedThread> mutators; std::vector<ScopedThread> mutators;
for (int i = 0; i < kDefaultThreadCount; ++i) { for (int i = 0; i < kDefaultThreadCount; ++i) {
mutators.emplace_back([&]() NO_INLINE { mutators.emplace_back([&]() NO_INLINE {
while (!canStart.load(std::memory_order_relaxed)) { while (!canStart.load(std::memory_order_relaxed)) {
@@ -48,7 +48,7 @@ TEST(AtomicStackTest, StressPushPop) {
canStart.store(true, std::memory_order_relaxed); canStart.store(true, std::memory_order_relaxed);
mutators.clear(); mutators.clear();
std_support::vector<Element*> actual; std::vector<Element*> actual;
while (auto* element = ready.Pop()) { while (auto* element = ready.Pop()) {
actual.push_back(element); actual.push_back(element);
} }
@@ -86,8 +86,8 @@ bool FixedBlockPage::Sweep(GCSweepScope& sweepHandle, FinalizerQueue& finalizerQ
return nextFree_.first > 0 || nextFree_.last < end_; return nextFree_.first > 0 || nextFree_.last < end_;
} }
std_support::vector<uint8_t*> FixedBlockPage::GetAllocatedBlocks() noexcept { std::vector<uint8_t*> FixedBlockPage::GetAllocatedBlocks() noexcept {
std_support::vector<uint8_t*> allocated; std::vector<uint8_t*> allocated;
CustomAllocInfo("FixedBlockPage(%p)::Sweep()", this); CustomAllocInfo("FixedBlockPage(%p)::Sweep()", this);
FixedCellRange nextFree = nextFree_; // Accessing the previous free list structure. FixedCellRange nextFree = nextFree_; // Accessing the previous free list structure.
for (uint32_t cell = 0 ; cell < end_ ; cell += blockSize_) { for (uint32_t cell = 0 ; cell < end_ ; cell += blockSize_) {
@@ -8,11 +8,11 @@
#include <atomic> #include <atomic>
#include <cstdint> #include <cstdint>
#include <vector>
#include "AtomicStack.hpp" #include "AtomicStack.hpp"
#include "ExtraObjectPage.hpp" #include "ExtraObjectPage.hpp"
#include "GCStatistics.hpp" #include "GCStatistics.hpp"
#include "std_support/Vector.hpp"
namespace kotlin::alloc { namespace kotlin::alloc {
@@ -45,7 +45,7 @@ public:
bool Sweep(GCSweepScope& sweepHandle, FinalizerQueue& finalizerQueue) noexcept; bool Sweep(GCSweepScope& sweepHandle, FinalizerQueue& finalizerQueue) noexcept;
// Testing method // Testing method
std_support::vector<uint8_t*> GetAllocatedBlocks() noexcept; std::vector<uint8_t*> GetAllocatedBlocks() noexcept;
private: private:
explicit FixedBlockPage(uint32_t blockSize) noexcept; explicit FixedBlockPage(uint32_t blockSize) noexcept;
@@ -10,6 +10,7 @@
#include <cstdlib> #include <cstdlib>
#include <cinttypes> #include <cinttypes>
#include <new> #include <new>
#include <vector>
#include "CustomAllocConstants.hpp" #include "CustomAllocConstants.hpp"
#include "AtomicStack.hpp" #include "AtomicStack.hpp"
@@ -19,7 +20,6 @@
#include "GCApi.hpp" #include "GCApi.hpp"
#include "Memory.h" #include "Memory.h"
#include "ThreadRegistry.hpp" #include "ThreadRegistry.hpp"
#include "std_support/Vector.hpp"
namespace kotlin::alloc { namespace kotlin::alloc {
@@ -7,11 +7,11 @@
#include <atomic> #include <atomic>
#include <cstdint> #include <cstdint>
#include <vector>
#include "CustomLogging.hpp" #include "CustomLogging.hpp"
#include "CustomAllocConstants.hpp" #include "CustomAllocConstants.hpp"
#include "GCApi.hpp" #include "GCApi.hpp"
#include "std_support/Vector.hpp"
namespace kotlin::alloc { namespace kotlin::alloc {
@@ -105,8 +105,8 @@ bool NextFitPage::CheckInvariants() noexcept {
} }
} }
std_support::vector<uint8_t*> NextFitPage::GetAllocatedBlocks() noexcept { std::vector<uint8_t*> NextFitPage::GetAllocatedBlocks() noexcept {
std_support::vector<uint8_t*> allocated; std::vector<uint8_t*> allocated;
Cell* end = cells_ + NEXT_FIT_PAGE_CELL_COUNT; Cell* end = cells_ + NEXT_FIT_PAGE_CELL_COUNT;
for (Cell* block = cells_ + 1; block != end; block = block->Next()) { for (Cell* block = cells_ + 1; block != end; block = block->Next()) {
if (block->isAllocated_) { if (block->isAllocated_) {
@@ -8,12 +8,12 @@
#include <atomic> #include <atomic>
#include <cstdint> #include <cstdint>
#include <vector>
#include "AtomicStack.hpp" #include "AtomicStack.hpp"
#include "Cell.hpp" #include "Cell.hpp"
#include "ExtraObjectPage.hpp" #include "ExtraObjectPage.hpp"
#include "GCStatistics.hpp" #include "GCStatistics.hpp"
#include "std_support/Vector.hpp"
namespace kotlin::alloc { namespace kotlin::alloc {
@@ -36,7 +36,7 @@ public:
bool CheckInvariants() noexcept; bool CheckInvariants() noexcept;
// Testing method // Testing method
std_support::vector<uint8_t*> GetAllocatedBlocks() noexcept; std::vector<uint8_t*> GetAllocatedBlocks() noexcept;
private: private:
explicit NextFitPage(uint32_t cellCount) noexcept; explicit NextFitPage(uint32_t cellCount) noexcept;
@@ -8,11 +8,11 @@
#include <atomic> #include <atomic>
#include <cstdint> #include <cstdint>
#include <vector>
#include "AtomicStack.hpp" #include "AtomicStack.hpp"
#include "ExtraObjectPage.hpp" #include "ExtraObjectPage.hpp"
#include "GCStatistics.hpp" #include "GCStatistics.hpp"
#include "std_support/Vector.hpp"
namespace kotlin::alloc { namespace kotlin::alloc {
@@ -104,8 +104,8 @@ private:
} }
// Testing method // Testing method
std_support::vector<T*> GetPages() noexcept { std::vector<T*> GetPages() noexcept {
std_support::vector<T*> pages; std::vector<T*> pages;
for (T* page : ready_.GetElements()) pages.push_back(page); for (T* page : ready_.GetElements()) pages.push_back(page);
for (T* page : used_.GetElements()) pages.push_back(page); for (T* page : used_.GetElements()) pages.push_back(page);
for (T* page : unswept_.GetElements()) pages.push_back(page); for (T* page : unswept_.GetElements()) pages.push_back(page);
@@ -11,7 +11,6 @@
#include "CustomLogging.hpp" #include "CustomLogging.hpp"
#include "CustomAllocConstants.hpp" #include "CustomAllocConstants.hpp"
#include "GCApi.hpp" #include "GCApi.hpp"
#include "std_support/Vector.hpp"
namespace kotlin::alloc { namespace kotlin::alloc {
@@ -47,8 +46,8 @@ bool SingleObjectPage::Sweep(GCSweepScope& sweepHandle, FinalizerQueue& finalize
return false; return false;
} }
std_support::vector<uint8_t*> SingleObjectPage::GetAllocatedBlocks() noexcept { std::vector<uint8_t*> SingleObjectPage::GetAllocatedBlocks() noexcept {
std_support::vector<uint8_t*> allocated; std::vector<uint8_t*> allocated;
if (isAllocated_) { if (isAllocated_) {
allocated.push_back(data_); allocated.push_back(data_);
} }
@@ -8,11 +8,11 @@
#include <atomic> #include <atomic>
#include <cstdint> #include <cstdint>
#include <vector>
#include "AtomicStack.hpp" #include "AtomicStack.hpp"
#include "ExtraObjectPage.hpp" #include "ExtraObjectPage.hpp"
#include "GCStatistics.hpp" #include "GCStatistics.hpp"
#include "std_support/Vector.hpp"
namespace kotlin::alloc { namespace kotlin::alloc {
@@ -39,7 +39,7 @@ private:
explicit SingleObjectPage(size_t size) noexcept; explicit SingleObjectPage(size_t size) noexcept;
// Testing method // Testing method
std_support::vector<uint8_t*> GetAllocatedBlocks() noexcept; std::vector<uint8_t*> GetAllocatedBlocks() noexcept;
std::atomic<SingleObjectPage*> next_; std::atomic<SingleObjectPage*> next_;
bool isAllocated_ = false; bool isAllocated_ = false;
@@ -5,11 +5,11 @@
#include "ObjectFactoryAllocator.hpp" #include "ObjectFactoryAllocator.hpp"
#include <memory>
#include "gmock/gmock.h" #include "gmock/gmock.h"
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "std_support/Memory.hpp"
using namespace kotlin; using namespace kotlin;
using testing::_; using testing::_;
@@ -28,7 +28,7 @@ public:
void* Alloc(size_t size) { return mock_->Alloc(size); } void* Alloc(size_t size) { return mock_->Alloc(size); }
private: private:
std_support::unique_ptr<testing::StrictMock<MockAllocator>> mock_ = std_support::make_unique<testing::StrictMock<MockAllocator>>(); std::unique_ptr<testing::StrictMock<MockAllocator>> mock_ = std::make_unique<testing::StrictMock<MockAllocator>>();
}; };
class MockGC { class MockGC {
@@ -194,11 +194,11 @@ public:
} }
} }
std_support::vector<ObjHeader*> Sweep() { std::vector<ObjHeader*> Sweep() {
gc::processWeaks<ProcessWeakTraits>(gc::GCHandle::getByEpoch(0), specialRefRegistry_); gc::processWeaks<ProcessWeakTraits>(gc::GCHandle::getByEpoch(0), specialRefRegistry_);
alloc::SweepExtraObjects<SweepTraits>(gc::GCHandle::getByEpoch(0), extraObjectFactory_); alloc::SweepExtraObjects<SweepTraits>(gc::GCHandle::getByEpoch(0), extraObjectFactory_);
auto finalizers = alloc::Sweep<SweepTraits>(gc::GCHandle::getByEpoch(0), objectFactory_); auto finalizers = alloc::Sweep<SweepTraits>(gc::GCHandle::getByEpoch(0), objectFactory_);
std_support::vector<ObjHeader*> objects; std::vector<ObjHeader*> objects;
for (auto node : finalizers.IterForTests()) { for (auto node : finalizers.IterForTests()) {
objects.push_back(node.GetObjHeader()); objects.push_back(node.GetObjHeader());
} }
@@ -206,16 +206,16 @@ public:
return objects; return objects;
} }
std_support::vector<ObjHeader*> Alive() { std::vector<ObjHeader*> Alive() {
std_support::vector<ObjHeader*> objects; std::vector<ObjHeader*> objects;
for (auto node : objectFactory_.LockForIter()) { for (auto node : objectFactory_.LockForIter()) {
objects.push_back(node.GetObjHeader()); objects.push_back(node.GetObjHeader());
} }
return objects; return objects;
} }
std_support::vector<mm::ExtraObjectData*> AliveExtraObjects() { std::vector<mm::ExtraObjectData*> AliveExtraObjects() {
std_support::vector<mm::ExtraObjectData*> objects; std::vector<mm::ExtraObjectData*> objects;
for (auto &node : extraObjectFactory_.LockForIter()) { for (auto &node : extraObjectFactory_.LockForIter()) {
objects.push_back(&node); objects.push_back(&node);
} }
@@ -273,7 +273,7 @@ private:
mm::SpecialRefRegistry specialRefRegistry_; mm::SpecialRefRegistry specialRefRegistry_;
mm::SpecialRefRegistry::ThreadQueue specialRefRegistryThreadQueue_{specialRefRegistry_}; mm::SpecialRefRegistry::ThreadQueue specialRefRegistryThreadQueue_{specialRefRegistry_};
std_support::vector<ObjectFactory::FinalizerQueue> finalizers_; std::vector<ObjectFactory::FinalizerQueue> finalizers_;
}; };
} // namespace } // namespace
@@ -6,7 +6,9 @@
#include "ObjectFactory.hpp" #include "ObjectFactory.hpp"
#include <atomic> #include <atomic>
#include <cstdlib>
#include <type_traits> #include <type_traits>
#include <vector>
#include "gmock/gmock.h" #include "gmock/gmock.h"
#include "gtest/gtest.h" #include "gtest/gtest.h"
@@ -18,8 +20,6 @@
#include "ScopedThread.hpp" #include "ScopedThread.hpp"
#include "TestSupport.hpp" #include "TestSupport.hpp"
#include "Types.h" #include "Types.h"
#include "std_support/CStdlib.hpp"
#include "std_support/Vector.hpp"
// ObjectFactory is not used by custom allocator // ObjectFactory is not used by custom allocator
using namespace kotlin; using namespace kotlin;
@@ -30,8 +30,8 @@ namespace {
class SimpleAllocator { class SimpleAllocator {
public: public:
void* Alloc(size_t size) noexcept { return std_support::calloc(1, size); } void* Alloc(size_t size) noexcept { return std::calloc(1, size); }
static void Free(void* instance, size_t size) noexcept { std_support::free(instance); } static void Free(void* instance, size_t size) noexcept { std::free(instance); }
}; };
struct DataSizeProvider { struct DataSizeProvider {
@@ -50,8 +50,8 @@ template <typename Storage>
using Consumer = typename Storage::Consumer; using Consumer = typename Storage::Consumer;
template <size_t DataAlignment> template <size_t DataAlignment>
std_support::vector<void*> Collect(ObjectFactoryStorage<DataAlignment>& storage) { std::vector<void*> Collect(ObjectFactoryStorage<DataAlignment>& storage) {
std_support::vector<void*> result; std::vector<void*> result;
for (auto& node : storage.LockForIter()) { for (auto& node : storage.LockForIter()) {
result.push_back(node.Data()); result.push_back(node.Data());
} }
@@ -59,8 +59,8 @@ std_support::vector<void*> Collect(ObjectFactoryStorage<DataAlignment>& storage)
} }
template <typename T, size_t DataAlignment> template <typename T, size_t DataAlignment>
std_support::vector<T> Collect(ObjectFactoryStorage<DataAlignment>& storage) { std::vector<T> Collect(ObjectFactoryStorage<DataAlignment>& storage) {
std_support::vector<T> result; std::vector<T> result;
for (auto& node : storage.LockForIter()) { for (auto& node : storage.LockForIter()) {
result.push_back(*reinterpret_cast<T*>(node.Data())); result.push_back(*reinterpret_cast<T*>(node.Data()));
} }
@@ -68,8 +68,8 @@ std_support::vector<T> Collect(ObjectFactoryStorage<DataAlignment>& storage) {
} }
template <typename T, size_t DataAlignment> template <typename T, size_t DataAlignment>
std_support::vector<T> Collect(Consumer<ObjectFactoryStorage<DataAlignment>>& consumer) { std::vector<T> Collect(Consumer<ObjectFactoryStorage<DataAlignment>>& consumer) {
std_support::vector<T> result; std::vector<T> result;
for (auto& node : consumer) { for (auto& node : consumer) {
result.push_back(*reinterpret_cast<T*>(node.Data())); result.push_back(*reinterpret_cast<T*>(node.Data()));
} }
@@ -641,8 +641,8 @@ TEST(ObjectFactoryStorageTest, ConcurrentPublish) {
constexpr int kThreadCount = kDefaultThreadCount; constexpr int kThreadCount = kDefaultThreadCount;
std::atomic<bool> canStart(false); std::atomic<bool> canStart(false);
std::atomic<int> readyCount(0); std::atomic<int> readyCount(0);
std_support::vector<ScopedThread> threads; std::vector<ScopedThread> threads;
std_support::vector<int> expected; std::vector<int> expected;
for (int i = 0; i < kThreadCount; ++i) { for (int i = 0; i < kThreadCount; ++i) {
expected.push_back(i); expected.push_back(i);
@@ -672,8 +672,8 @@ TEST(ObjectFactoryStorageTest, IterWhileConcurrentPublish) {
constexpr int kStartCount = 50; constexpr int kStartCount = 50;
constexpr int kThreadCount = kDefaultThreadCount; constexpr int kThreadCount = kDefaultThreadCount;
std_support::vector<int> expectedBefore; std::vector<int> expectedBefore;
std_support::vector<int> expectedAfter; std::vector<int> expectedAfter;
Producer<ObjectFactoryStorageRegular> producer(storage, SimpleAllocator()); Producer<ObjectFactoryStorageRegular> producer(storage, SimpleAllocator());
for (int i = 0; i < kStartCount; ++i) { for (int i = 0; i < kStartCount; ++i) {
expectedBefore.push_back(i); expectedBefore.push_back(i);
@@ -685,7 +685,7 @@ TEST(ObjectFactoryStorageTest, IterWhileConcurrentPublish) {
std::atomic<bool> canStart(false); std::atomic<bool> canStart(false);
std::atomic<int> readyCount(0); std::atomic<int> readyCount(0);
std::atomic<int> startedCount(0); std::atomic<int> startedCount(0);
std_support::vector<ScopedThread> threads; std::vector<ScopedThread> threads;
for (int i = 0; i < kThreadCount; ++i) { for (int i = 0; i < kThreadCount; ++i) {
int j = i + kStartCount; int j = i + kStartCount;
expectedAfter.push_back(j); expectedAfter.push_back(j);
@@ -700,7 +700,7 @@ TEST(ObjectFactoryStorageTest, IterWhileConcurrentPublish) {
}); });
} }
std_support::vector<int> actualBefore; std::vector<int> actualBefore;
{ {
auto iter = storage.LockForIter(); auto iter = storage.LockForIter();
while (readyCount < kThreadCount) { while (readyCount < kThreadCount) {
@@ -730,7 +730,7 @@ TEST(ObjectFactoryStorageTest, EraseWhileConcurrentPublish) {
constexpr int kStartCount = 50; constexpr int kStartCount = 50;
constexpr int kThreadCount = kDefaultThreadCount; constexpr int kThreadCount = kDefaultThreadCount;
std_support::vector<int> expectedAfter; std::vector<int> expectedAfter;
Producer<ObjectFactoryStorageRegular> producer(storage, SimpleAllocator()); Producer<ObjectFactoryStorageRegular> producer(storage, SimpleAllocator());
for (int i = 0; i < kStartCount; ++i) { for (int i = 0; i < kStartCount; ++i) {
if (i % 2 == 0) { if (i % 2 == 0) {
@@ -743,7 +743,7 @@ TEST(ObjectFactoryStorageTest, EraseWhileConcurrentPublish) {
std::atomic<bool> canStart(false); std::atomic<bool> canStart(false);
std::atomic<int> readyCount(0); std::atomic<int> readyCount(0);
std::atomic<int> startedCount(0); std::atomic<int> startedCount(0);
std_support::vector<ScopedThread> threads; std::vector<ScopedThread> threads;
for (int i = 0; i < kThreadCount; ++i) { for (int i = 0; i < kThreadCount; ++i) {
int j = i + kStartCount; int j = i + kStartCount;
expectedAfter.push_back(j); expectedAfter.push_back(j);
@@ -793,9 +793,9 @@ public:
MOCK_METHOD(void*, Alloc, (size_t)); MOCK_METHOD(void*, Alloc, (size_t));
MOCK_METHOD(void, Free, (void*, size_t)); MOCK_METHOD(void, Free, (void*, size_t));
void* DefaultAlloc(size_t size) { return std_support::calloc(1, size); } void* DefaultAlloc(size_t size) { return std::calloc(1, size); }
void DefaultFree(void* instance, size_t size) { std_support::free(instance); } void DefaultFree(void* instance, size_t size) { std::free(instance); }
}; };
class GlobalMockAllocator { class GlobalMockAllocator {
@@ -1067,7 +1067,7 @@ TEST(ObjectFactoryTest, RunFinalizers) {
ObjectFactory::ThreadQueue threadQueue(objectFactory, GlobalMockAllocator()); ObjectFactory::ThreadQueue threadQueue(objectFactory, GlobalMockAllocator());
ObjectFactory::FinalizerQueue finalizerQueue; ObjectFactory::FinalizerQueue finalizerQueue;
std_support::vector<ObjHeader*> objects; std::vector<ObjHeader*> objects;
EXPECT_CALL(allocator, Alloc(_)).Times(10); EXPECT_CALL(allocator, Alloc(_)).Times(10);
for (int i = 0; i < 10; ++i) { for (int i = 0; i < 10; ++i) {
objects.push_back(threadQueue.CreateObject(objectType.typeInfo())); objects.push_back(threadQueue.CreateObject(objectType.typeInfo()));
@@ -1100,9 +1100,9 @@ TEST(ObjectFactoryTest, ConcurrentPublish) {
constexpr int kThreadCount = kDefaultThreadCount; constexpr int kThreadCount = kDefaultThreadCount;
std::atomic<bool> canStart(false); std::atomic<bool> canStart(false);
std::atomic<int> readyCount(0); std::atomic<int> readyCount(0);
std_support::vector<ScopedThread> threads; std::vector<ScopedThread> threads;
std::mutex expectedMutex; std::mutex expectedMutex;
std_support::vector<ObjHeader*> expected; std::vector<ObjHeader*> expected;
EXPECT_CALL(allocator, Alloc(_)).Times(kThreadCount); EXPECT_CALL(allocator, Alloc(_)).Times(kThreadCount);
for (int i = 0; i < kThreadCount; ++i) { for (int i = 0; i < kThreadCount; ++i) {
@@ -1127,7 +1127,7 @@ TEST(ObjectFactoryTest, ConcurrentPublish) {
threads.clear(); threads.clear();
auto iter = objectFactory.LockForIter(); auto iter = objectFactory.LockForIter();
std_support::vector<ObjHeader*> actual; std::vector<ObjHeader*> actual;
for (auto it = iter.begin(); it != iter.end(); ++it) { for (auto it = iter.begin(); it != iter.end(); ++it) {
actual.push_back(it->GetObjHeader()); actual.push_back(it->GetObjHeader());
} }
@@ -7,6 +7,8 @@
#include <atomic> #include <atomic>
#include <cstddef> #include <cstddef>
#include <memory>
#include <vector>
#include "AllocatorImpl.hpp" #include "AllocatorImpl.hpp"
#include "Barriers.hpp" #include "Barriers.hpp"
@@ -22,7 +24,6 @@
#include "ThreadData.hpp" #include "ThreadData.hpp"
#include "Types.h" #include "Types.h"
#include "Utils.hpp" #include "Utils.hpp"
#include "std_support/Memory.hpp"
namespace kotlin { namespace kotlin {
namespace gc { namespace gc {
@@ -86,7 +87,7 @@ private:
mark::ParallelMark markDispatcher_; mark::ParallelMark markDispatcher_;
ScopedThread mainThread_; ScopedThread mainThread_;
std_support::vector<ScopedThread> auxThreads_; std::vector<ScopedThread> auxThreads_;
}; };
} // namespace gc } // namespace gc
@@ -9,6 +9,7 @@
#include <future> #include <future>
#include <mutex> #include <mutex>
#include <thread> #include <thread>
#include <vector>
#include "gmock/gmock.h" #include "gmock/gmock.h"
#include "gtest/gtest.h" #include "gtest/gtest.h"
@@ -26,7 +27,6 @@
#include "TestSupport.hpp" #include "TestSupport.hpp"
#include "ThreadData.hpp" #include "ThreadData.hpp"
#include "WeakRef.hpp" #include "WeakRef.hpp"
#include "std_support/Vector.hpp"
using namespace kotlin; using namespace kotlin;
@@ -663,7 +663,7 @@ public:
StackObjectHolder& AddStackRoot() { StackObjectHolder& AddStackRoot() {
RuntimeAssert(std::this_thread::get_id() == executor_.threadId(), "AddStackRoot can only be called in the mutator thread"); RuntimeAssert(std::this_thread::get_id() == executor_.threadId(), "AddStackRoot can only be called in the mutator thread");
auto& context = executor_.context(); auto& context = executor_.context();
auto holder = std_support::make_unique<StackObjectHolder>(*context.memory_->memoryState()->GetThreadData()); auto holder = std::make_unique<StackObjectHolder>(*context.memory_->memoryState()->GetThreadData());
auto& holderRef = *holder; auto& holderRef = *holder;
context.stackRoots_.push_back(std::move(holder)); context.stackRoots_.push_back(std::move(holder));
return holderRef; return holderRef;
@@ -672,7 +672,7 @@ public:
StackObjectHolder& AddStackRoot(ObjHeader* object) { StackObjectHolder& AddStackRoot(ObjHeader* object) {
RuntimeAssert(std::this_thread::get_id() == executor_.threadId(), "AddStackRoot can only be called in the mutator thread"); RuntimeAssert(std::this_thread::get_id() == executor_.threadId(), "AddStackRoot can only be called in the mutator thread");
auto& context = executor_.context(); auto& context = executor_.context();
auto holder = std_support::make_unique<StackObjectHolder>(object); auto holder = std::make_unique<StackObjectHolder>(object);
auto& holderRef = *holder; auto& holderRef = *holder;
context.stackRoots_.push_back(std::move(holder)); context.stackRoots_.push_back(std::move(holder));
return holderRef; return holderRef;
@@ -681,7 +681,7 @@ public:
GlobalObjectHolder& AddGlobalRoot() { GlobalObjectHolder& AddGlobalRoot() {
RuntimeAssert(std::this_thread::get_id() == executor_.threadId(), "AddGlobalRoot can only be called in the mutator thread"); RuntimeAssert(std::this_thread::get_id() == executor_.threadId(), "AddGlobalRoot can only be called in the mutator thread");
auto& context = executor_.context(); auto& context = executor_.context();
auto holder = std_support::make_unique<GlobalObjectHolder>(*context.memory_->memoryState()->GetThreadData()); auto holder = std::make_unique<GlobalObjectHolder>(*context.memory_->memoryState()->GetThreadData());
auto& holderRef = *holder; auto& holderRef = *holder;
context.globalRoots_.push_back(std::move(holder)); context.globalRoots_.push_back(std::move(holder));
return holderRef; return holderRef;
@@ -690,7 +690,7 @@ public:
GlobalObjectHolder& AddGlobalRoot(ObjHeader* object) { GlobalObjectHolder& AddGlobalRoot(ObjHeader* object) {
RuntimeAssert(std::this_thread::get_id() == executor_.threadId(), "AddGlobalRoot can only be called in the mutator thread"); RuntimeAssert(std::this_thread::get_id() == executor_.threadId(), "AddGlobalRoot can only be called in the mutator thread");
auto& context = executor_.context(); auto& context = executor_.context();
auto holder = std_support::make_unique<GlobalObjectHolder>(*context.memory_->memoryState()->GetThreadData(), object); auto holder = std::make_unique<GlobalObjectHolder>(*context.memory_->memoryState()->GetThreadData(), object);
auto& holderRef = *holder; auto& holderRef = *holder;
context.globalRoots_.push_back(std::move(holder)); context.globalRoots_.push_back(std::move(holder));
return holderRef; return holderRef;
@@ -700,11 +700,11 @@ public:
private: private:
struct Context { struct Context {
std_support::unique_ptr<ScopedMemoryInit> memory_; std::unique_ptr<ScopedMemoryInit> memory_;
std_support::vector<std_support::unique_ptr<StackObjectHolder>> stackRoots_; std::vector<std::unique_ptr<StackObjectHolder>> stackRoots_;
std_support::vector<std_support::unique_ptr<GlobalObjectHolder>> globalRoots_; std::vector<std::unique_ptr<GlobalObjectHolder>> globalRoots_;
Context() : memory_(std_support::make_unique<ScopedMemoryInit>()) { Context() : memory_(std::make_unique<ScopedMemoryInit>()) {
// SingleThreadExecutor must work in the runnable state, so that GC does not collect between tasks. // SingleThreadExecutor must work in the runnable state, so that GC does not collect between tasks.
AssertThreadState(memory_->memoryState(), ThreadState::kRunnable); AssertThreadState(memory_->memoryState(), ThreadState::kRunnable);
} }
@@ -716,10 +716,10 @@ private:
} // namespace } // namespace
TEST_P(ConcurrentMarkAndSweepTest, MultipleMutatorsCollect) { TEST_P(ConcurrentMarkAndSweepTest, MultipleMutatorsCollect) {
std_support::vector<Mutator> mutators(kDefaultThreadCount); std::vector<Mutator> mutators(kDefaultThreadCount);
std_support::vector<ObjHeader*> globals(kDefaultThreadCount); std::vector<ObjHeader*> globals(kDefaultThreadCount);
std_support::vector<ObjHeader*> locals(kDefaultThreadCount); std::vector<ObjHeader*> locals(kDefaultThreadCount);
std_support::vector<ObjHeader*> reachables(kDefaultThreadCount); std::vector<ObjHeader*> reachables(kDefaultThreadCount);
auto expandRootSet = [&globals, &locals, &reachables](mm::ThreadData& threadData, Mutator& mutator, int i) { auto expandRootSet = [&globals, &locals, &reachables](mm::ThreadData& threadData, Mutator& mutator, int i) {
auto& global = mutator.AddGlobalRoot(); auto& global = mutator.AddGlobalRoot();
@@ -738,7 +738,7 @@ TEST_P(ConcurrentMarkAndSweepTest, MultipleMutatorsCollect) {
.wait(); .wait();
} }
std_support::vector<std::future<void>> gcFutures; std::vector<std::future<void>> gcFutures;
std::atomic<bool> gcDone = false; std::atomic<bool> gcDone = false;
for (auto& mutator : mutators) { for (auto& mutator : mutators) {
@@ -756,7 +756,7 @@ TEST_P(ConcurrentMarkAndSweepTest, MultipleMutatorsCollect) {
future.wait(); future.wait();
} }
std_support::vector<ObjHeader*> expectedAlive; std::vector<ObjHeader*> expectedAlive;
for (auto& global : globals) { for (auto& global : globals) {
expectedAlive.push_back(global); expectedAlive.push_back(global);
} }
@@ -773,10 +773,10 @@ TEST_P(ConcurrentMarkAndSweepTest, MultipleMutatorsCollect) {
} }
TEST_P(ConcurrentMarkAndSweepTest, MultipleMutatorsAllCollect) { TEST_P(ConcurrentMarkAndSweepTest, MultipleMutatorsAllCollect) {
std_support::vector<Mutator> mutators(kDefaultThreadCount); std::vector<Mutator> mutators(kDefaultThreadCount);
std_support::vector<ObjHeader*> globals(kDefaultThreadCount); std::vector<ObjHeader*> globals(kDefaultThreadCount);
std_support::vector<ObjHeader*> locals(kDefaultThreadCount); std::vector<ObjHeader*> locals(kDefaultThreadCount);
std_support::vector<ObjHeader*> reachables(kDefaultThreadCount); std::vector<ObjHeader*> reachables(kDefaultThreadCount);
auto expandRootSet = [&globals, &locals, &reachables](mm::ThreadData& threadData, Mutator& mutator, int i) { auto expandRootSet = [&globals, &locals, &reachables](mm::ThreadData& threadData, Mutator& mutator, int i) {
auto& global = mutator.AddGlobalRoot(); auto& global = mutator.AddGlobalRoot();
@@ -795,7 +795,7 @@ TEST_P(ConcurrentMarkAndSweepTest, MultipleMutatorsAllCollect) {
.wait(); .wait();
} }
std_support::vector<std::future<void>> gcFutures; std::vector<std::future<void>> gcFutures;
for (auto& mutator : mutators) { for (auto& mutator : mutators) {
gcFutures.emplace_back(mutator.Execute([](mm::ThreadData& threadData, Mutator& mutator) { gcFutures.emplace_back(mutator.Execute([](mm::ThreadData& threadData, Mutator& mutator) {
@@ -818,7 +818,7 @@ TEST_P(ConcurrentMarkAndSweepTest, MultipleMutatorsAllCollect) {
.wait(); .wait();
} }
std_support::vector<ObjHeader*> expectedAlive; std::vector<ObjHeader*> expectedAlive;
for (auto& global : globals) { for (auto& global : globals) {
expectedAlive.push_back(global); expectedAlive.push_back(global);
} }
@@ -835,10 +835,10 @@ TEST_P(ConcurrentMarkAndSweepTest, MultipleMutatorsAllCollect) {
} }
TEST_P(ConcurrentMarkAndSweepTest, MultipleMutatorsAddToRootSetAfterCollectionRequested) { TEST_P(ConcurrentMarkAndSweepTest, MultipleMutatorsAddToRootSetAfterCollectionRequested) {
std_support::vector<Mutator> mutators(kDefaultThreadCount); std::vector<Mutator> mutators(kDefaultThreadCount);
std_support::vector<ObjHeader*> globals(kDefaultThreadCount); std::vector<ObjHeader*> globals(kDefaultThreadCount);
std_support::vector<ObjHeader*> locals(kDefaultThreadCount); std::vector<ObjHeader*> locals(kDefaultThreadCount);
std_support::vector<ObjHeader*> reachables(kDefaultThreadCount); std::vector<ObjHeader*> reachables(kDefaultThreadCount);
auto allocateInHeap = [&globals, &locals, &reachables](mm::ThreadData& threadData, Mutator& mutator, int i) { auto allocateInHeap = [&globals, &locals, &reachables](mm::ThreadData& threadData, Mutator& mutator, int i) {
auto& global = AllocateObject(threadData); auto& global = AllocateObject(threadData);
@@ -865,7 +865,7 @@ TEST_P(ConcurrentMarkAndSweepTest, MultipleMutatorsAddToRootSetAfterCollectionRe
.wait(); .wait();
} }
std_support::vector<std::future<void>> gcFutures; std::vector<std::future<void>> gcFutures;
auto epoch = mm::GlobalData::Instance().gc().Schedule(); auto epoch = mm::GlobalData::Instance().gc().Schedule();
std::atomic<bool> gcDone = false; std::atomic<bool> gcDone = false;
@@ -889,7 +889,7 @@ TEST_P(ConcurrentMarkAndSweepTest, MultipleMutatorsAddToRootSetAfterCollectionRe
future.wait(); future.wait();
} }
std_support::vector<ObjHeader*> expectedAlive; std::vector<ObjHeader*> expectedAlive;
for (auto& global : globals) { for (auto& global : globals) {
expectedAlive.push_back(global); expectedAlive.push_back(global);
} }
@@ -906,10 +906,10 @@ TEST_P(ConcurrentMarkAndSweepTest, MultipleMutatorsAddToRootSetAfterCollectionRe
} }
TEST_P(ConcurrentMarkAndSweepTest, CrossThreadReference) { TEST_P(ConcurrentMarkAndSweepTest, CrossThreadReference) {
std_support::vector<Mutator> mutators(kDefaultThreadCount); std::vector<Mutator> mutators(kDefaultThreadCount);
std_support::vector<ObjHeader*> globals(kDefaultThreadCount); std::vector<ObjHeader*> globals(kDefaultThreadCount);
std_support::vector<ObjHeader*> locals(kDefaultThreadCount); std::vector<ObjHeader*> locals(kDefaultThreadCount);
std_support::vector<ObjHeader*> reachables(2 * kDefaultThreadCount); std::vector<ObjHeader*> reachables(2 * kDefaultThreadCount);
auto expandRootSet = [&globals, &locals, &reachables](mm::ThreadData& threadData, Mutator& mutator, int i) { auto expandRootSet = [&globals, &locals, &reachables](mm::ThreadData& threadData, Mutator& mutator, int i) {
auto& global = mutator.AddGlobalRoot(); auto& global = mutator.AddGlobalRoot();
@@ -935,7 +935,7 @@ TEST_P(ConcurrentMarkAndSweepTest, CrossThreadReference) {
.wait(); .wait();
} }
std_support::vector<std::future<void>> gcFutures; std::vector<std::future<void>> gcFutures;
std::atomic<bool> gcDone = false; std::atomic<bool> gcDone = false;
for (auto& mutator : mutators) { for (auto& mutator : mutators) {
@@ -953,7 +953,7 @@ TEST_P(ConcurrentMarkAndSweepTest, CrossThreadReference) {
future.wait(); future.wait();
} }
std_support::vector<ObjHeader*> expectedAlive; std::vector<ObjHeader*> expectedAlive;
for (auto& global : globals) { for (auto& global : globals) {
expectedAlive.push_back(global); expectedAlive.push_back(global);
} }
@@ -973,7 +973,7 @@ TEST_P(ConcurrentMarkAndSweepTest, CrossThreadReference) {
} }
TEST_P(ConcurrentMarkAndSweepTest, MultipleMutatorsWeaks) { TEST_P(ConcurrentMarkAndSweepTest, MultipleMutatorsWeaks) {
std_support::vector<Mutator> mutators(kDefaultThreadCount); std::vector<Mutator> mutators(kDefaultThreadCount);
ObjHeader* globalRoot = nullptr; ObjHeader* globalRoot = nullptr;
test_support::RegularWeakReferenceImpl* weak = nullptr; test_support::RegularWeakReferenceImpl* weak = nullptr;
@@ -997,7 +997,7 @@ TEST_P(ConcurrentMarkAndSweepTest, MultipleMutatorsWeaks) {
mutators[i].Execute([](mm::ThreadData& threadData, Mutator& mutator) {}).wait(); mutators[i].Execute([](mm::ThreadData& threadData, Mutator& mutator) {}).wait();
} }
std_support::vector<std::future<void>> gcFutures; std::vector<std::future<void>> gcFutures;
auto epoch = mm::GlobalData::Instance().gc().Schedule(); auto epoch = mm::GlobalData::Instance().gc().Schedule();
std::atomic<bool> gcDone = false; std::atomic<bool> gcDone = false;
@@ -1027,14 +1027,14 @@ TEST_P(ConcurrentMarkAndSweepTest, MultipleMutatorsWeaks) {
} }
TEST_P(ConcurrentMarkAndSweepTest, MultipleMutatorsWeakNewObj) { TEST_P(ConcurrentMarkAndSweepTest, MultipleMutatorsWeakNewObj) {
std_support::vector<Mutator> mutators(kDefaultThreadCount); std::vector<Mutator> mutators(kDefaultThreadCount);
// Make sure all mutators are initialized. // Make sure all mutators are initialized.
for (int i = 0; i < kDefaultThreadCount; ++i) { for (int i = 0; i < kDefaultThreadCount; ++i) {
mutators[i].Execute([](mm::ThreadData& threadData, Mutator& mutator) {}).wait(); mutators[i].Execute([](mm::ThreadData& threadData, Mutator& mutator) {}).wait();
} }
std_support::vector<std::future<void>> gcFutures; std::vector<std::future<void>> gcFutures;
auto epoch = mm::GlobalData::Instance().gc().Schedule(); auto epoch = mm::GlobalData::Instance().gc().Schedule();
std::atomic<bool> gcDone = false; std::atomic<bool> gcDone = false;
@@ -1073,11 +1073,11 @@ TEST_P(ConcurrentMarkAndSweepTest, MultipleMutatorsWeakNewObj) {
} }
TEST_P(ConcurrentMarkAndSweepTest, NewThreadsWhileRequestingCollection) { TEST_P(ConcurrentMarkAndSweepTest, NewThreadsWhileRequestingCollection) {
std_support::vector<Mutator> mutators(kDefaultThreadCount); std::vector<Mutator> mutators(kDefaultThreadCount);
std_support::vector<ObjHeader*> globals(2 * kDefaultThreadCount); std::vector<ObjHeader*> globals(2 * kDefaultThreadCount);
std_support::vector<ObjHeader*> locals(2 * kDefaultThreadCount); std::vector<ObjHeader*> locals(2 * kDefaultThreadCount);
std_support::vector<ObjHeader*> reachables(2 * kDefaultThreadCount); std::vector<ObjHeader*> reachables(2 * kDefaultThreadCount);
std_support::vector<ObjHeader*> unreachables(2 * kDefaultThreadCount); std::vector<ObjHeader*> unreachables(2 * kDefaultThreadCount);
auto expandRootSet = [&globals, &locals, &reachables, &unreachables](mm::ThreadData& threadData, Mutator& mutator, int i) { auto expandRootSet = [&globals, &locals, &reachables, &unreachables](mm::ThreadData& threadData, Mutator& mutator, int i) {
auto& global = mutator.AddGlobalRoot(); auto& global = mutator.AddGlobalRoot();
@@ -1097,7 +1097,7 @@ TEST_P(ConcurrentMarkAndSweepTest, NewThreadsWhileRequestingCollection) {
.wait(); .wait();
} }
std_support::vector<std::future<void>> gcFutures; std::vector<std::future<void>> gcFutures;
auto epoch = mm::GlobalData::Instance().gc().Schedule(); auto epoch = mm::GlobalData::Instance().gc().Schedule();
std::atomic<bool> gcDone = false; std::atomic<bool> gcDone = false;
@@ -1106,8 +1106,8 @@ TEST_P(ConcurrentMarkAndSweepTest, NewThreadsWhileRequestingCollection) {
} }
// Now start attaching new threads. // Now start attaching new threads.
std_support::vector<Mutator> newMutators(kDefaultThreadCount); std::vector<Mutator> newMutators(kDefaultThreadCount);
std_support::vector<std::future<void>> attachFutures(kDefaultThreadCount); std::vector<std::future<void>> attachFutures(kDefaultThreadCount);
for (int i = 0; i < kDefaultThreadCount; ++i) { for (int i = 0; i < kDefaultThreadCount; ++i) {
attachFutures[i] = newMutators[i].Execute([&gcDone, i, expandRootSet](mm::ThreadData& threadData, Mutator& mutator) { attachFutures[i] = newMutators[i].Execute([&gcDone, i, expandRootSet](mm::ThreadData& threadData, Mutator& mutator) {
@@ -1141,7 +1141,7 @@ TEST_P(ConcurrentMarkAndSweepTest, NewThreadsWhileRequestingCollection) {
future.wait(); future.wait();
} }
std_support::vector<ObjHeader*> expectedAlive; std::vector<ObjHeader*> expectedAlive;
for (int i = 0; i < kDefaultThreadCount; ++i) { for (int i = 0; i < kDefaultThreadCount; ++i) {
expectedAlive.push_back(globals[i]); expectedAlive.push_back(globals[i]);
expectedAlive.push_back(locals[i]); expectedAlive.push_back(locals[i]);
@@ -1155,7 +1155,7 @@ TEST_P(ConcurrentMarkAndSweepTest, NewThreadsWhileRequestingCollection) {
// Force mutators to publish their internal heaps // Force mutators to publish their internal heaps
// Really only needed for legacy allocators. // Really only needed for legacy allocators.
std_support::vector<std::future<void>> publishFutures; std::vector<std::future<void>> publishFutures;
for (auto& mutator: mutators) { for (auto& mutator: mutators) {
publishFutures.emplace_back( publishFutures.emplace_back(
mutator.Execute([](mm::ThreadData& threadData, Mutator& mutator) { threadData.allocator().prepareForGC(); })); mutator.Execute([](mm::ThreadData& threadData, Mutator& mutator) { threadData.allocator().prepareForGC(); }));
@@ -1173,7 +1173,7 @@ TEST_P(ConcurrentMarkAndSweepTest, NewThreadsWhileRequestingCollection) {
} }
TEST_P(ConcurrentMarkAndSweepTest, FreeObjectWithFreeWeakReversedOrder) { TEST_P(ConcurrentMarkAndSweepTest, FreeObjectWithFreeWeakReversedOrder) {
std_support::vector<Mutator> mutators(2); std::vector<Mutator> mutators(2);
std::atomic<test_support::Object<Payload>*> object1 = nullptr; std::atomic<test_support::Object<Payload>*> object1 = nullptr;
std::atomic<test_support::RegularWeakReferenceImpl*> weak = nullptr; std::atomic<test_support::RegularWeakReferenceImpl*> weak = nullptr;
std::atomic<bool> done = false; std::atomic<bool> done = false;
@@ -5,16 +5,17 @@
#include "GCImpl.hpp" #include "GCImpl.hpp"
#include <memory>
#include "ConcurrentMarkAndSweep.hpp" #include "ConcurrentMarkAndSweep.hpp"
#include "GC.hpp" #include "GC.hpp"
#include "GCStatistics.hpp" #include "GCStatistics.hpp"
#include "MarkAndSweepUtils.hpp" #include "MarkAndSweepUtils.hpp"
#include "ObjectOps.hpp" #include "ObjectOps.hpp"
#include "std_support/Memory.hpp"
using namespace kotlin; using namespace kotlin;
gc::GC::ThreadData::ThreadData(GC& gc, mm::ThreadData& threadData) noexcept : impl_(std_support::make_unique<Impl>(gc, threadData)) {} gc::GC::ThreadData::ThreadData(GC& gc, mm::ThreadData& threadData) noexcept : impl_(std::make_unique<Impl>(gc, threadData)) {}
gc::GC::ThreadData::~ThreadData() = default; gc::GC::ThreadData::~ThreadData() = default;
@@ -35,7 +36,7 @@ void gc::GC::ThreadData::onThreadRegistration() noexcept {
} }
gc::GC::GC(alloc::Allocator& allocator, gcScheduler::GCScheduler& gcScheduler) noexcept : gc::GC::GC(alloc::Allocator& allocator, gcScheduler::GCScheduler& gcScheduler) noexcept :
impl_(std_support::make_unique<Impl>(allocator, gcScheduler)) {} impl_(std::make_unique<Impl>(allocator, gcScheduler)) {}
gc::GC::~GC() = default; gc::GC::~GC() = default;
@@ -1,9 +1,13 @@
/*
* Copyright 2010-2023 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license
* that can be found in the LICENSE file.
*/
#include "ParallelMark.hpp" #include "ParallelMark.hpp"
#include "MarkAndSweepUtils.hpp" #include "MarkAndSweepUtils.hpp"
#include "GCStatistics.hpp" #include "GCStatistics.hpp"
#include "Utils.hpp" #include "Utils.hpp"
#include "std_support/Memory.hpp"
// required to access gc thread data // required to access gc thread data
#include "GCImpl.hpp" #include "GCImpl.hpp"
@@ -1,7 +1,12 @@
/*
* Copyright 2010-2023 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license
* that can be found in the LICENSE file.
*/
#pragma once #pragma once
#include <mutex>
#include <condition_variable> #include <condition_variable>
#include <mutex>
#include "GCStatistics.hpp" #include "GCStatistics.hpp"
#include "ManuallyScoped.hpp" #include "ManuallyScoped.hpp"
@@ -9,7 +14,6 @@
#include "ParallelProcessor.hpp" #include "ParallelProcessor.hpp"
#include "ThreadRegistry.hpp" #include "ThreadRegistry.hpp"
#include "Utils.hpp" #include "Utils.hpp"
#include "std_support/Vector.hpp"
namespace kotlin::gc::mark { namespace kotlin::gc::mark {
@@ -16,7 +16,7 @@ namespace {
class FinalizerProcessorTest : public testing::Test { class FinalizerProcessorTest : public testing::Test {
public: public:
using FinalizerQueue = std_support::vector<int>; using FinalizerQueue = std::vector<int>;
struct FinalizerQueueTraits { struct FinalizerQueueTraits {
static bool isEmpty(const FinalizerQueue& queue) noexcept { return queue.empty(); } static bool isEmpty(const FinalizerQueue& queue) noexcept { return queue.empty(); }
@@ -7,12 +7,12 @@
#include <atomic> #include <atomic>
#include <cstdint> #include <cstdint>
#include <memory>
#include "ExtraObjectData.hpp" #include "ExtraObjectData.hpp"
#include "GCScheduler.hpp" #include "GCScheduler.hpp"
#include "Memory.h" #include "Memory.h"
#include "Utils.hpp" #include "Utils.hpp"
#include "std_support/Memory.hpp"
namespace kotlin { namespace kotlin {
@@ -48,7 +48,7 @@ public:
void onAllocation(ObjHeader* object) noexcept; void onAllocation(ObjHeader* object) noexcept;
private: private:
std_support::unique_ptr<Impl> impl_; std::unique_ptr<Impl> impl_;
}; };
// Header to be placed before each heap object. GC will use this to keep its data if needed. // Header to be placed before each heap object. GC will use this to keep its data if needed.
@@ -80,7 +80,7 @@ public:
void WaitFinalizers(int64_t epoch) noexcept; void WaitFinalizers(int64_t epoch) noexcept;
private: private:
std_support::unique_ptr<Impl> impl_; std::unique_ptr<Impl> impl_;
}; };
bool isMarked(ObjHeader* object) noexcept; bool isMarked(ObjHeader* object) noexcept;
@@ -6,16 +6,16 @@
#include "MarkAndSweepUtils.hpp" #include "MarkAndSweepUtils.hpp"
#include <functional> #include <functional>
#include <TestSupport.hpp> #include <unordered_set>
#include <vector>
#include "gmock/gmock.h" #include "gmock/gmock.h"
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "FinalizerHooks.hpp" #include "FinalizerHooks.hpp"
#include "ObjectTestSupport.hpp" #include "ObjectTestSupport.hpp"
#include "TestSupport.hpp"
#include "Utils.hpp" #include "Utils.hpp"
#include "std_support/UnorderedSet.hpp"
#include "std_support/Vector.hpp"
using namespace kotlin; using namespace kotlin;
@@ -73,7 +73,7 @@ public:
class ScopedMarkTraits : private Pinned { class ScopedMarkTraits : private Pinned {
public: public:
using MarkQueue = std_support::vector<ObjHeader*>; using MarkQueue = std::vector<ObjHeader*>;
ScopedMarkTraits() { ScopedMarkTraits() {
RuntimeAssert(instance_ == nullptr, "Only one ScopedMarkTraits is allowed"); RuntimeAssert(instance_ == nullptr, "Only one ScopedMarkTraits is allowed");
@@ -85,7 +85,7 @@ public:
instance_ = nullptr; instance_ = nullptr;
} }
const std_support::unordered_set<ObjHeader*>& marked() const { return marked_; } const std::unordered_set<ObjHeader*>& marked() const { return marked_; }
static void clear(MarkQueue& queue) noexcept { static void clear(MarkQueue& queue) noexcept {
queue.clear(); queue.clear();
@@ -122,7 +122,7 @@ public:
private: private:
static ScopedMarkTraits* instance_; static ScopedMarkTraits* instance_;
std_support::unordered_set<ObjHeader*> marked_; std::unordered_set<ObjHeader*> marked_;
}; };
// static // static
@@ -130,10 +130,10 @@ ScopedMarkTraits* ScopedMarkTraits::instance_ = nullptr;
class MarkAndSweepUtilsMarkTest : public ::testing::Test { class MarkAndSweepUtilsMarkTest : public ::testing::Test {
public: public:
const std_support::unordered_set<ObjHeader*>& marked() const { return markTraits_.marked(); } const std::unordered_set<ObjHeader*>& marked() const { return markTraits_.marked(); }
auto MarkedMatcher(std::initializer_list<std::reference_wrapper<test_support::Any>> expected) { auto MarkedMatcher(std::initializer_list<std::reference_wrapper<test_support::Any>> expected) {
std_support::vector<ObjHeader*> objects; std::vector<ObjHeader*> objects;
for (auto& object : expected) { for (auto& object : expected) {
objects.push_back(object.get().header()); objects.push_back(object.get().header());
} }
@@ -141,7 +141,7 @@ public:
} }
gc::MarkStats Mark(std::initializer_list<std::reference_wrapper<test_support::Any>> graySet) { gc::MarkStats Mark(std::initializer_list<std::reference_wrapper<test_support::Any>> graySet) {
std_support::vector<ObjHeader*> objects; std::vector<ObjHeader*> objects;
for (auto& object : graySet) ScopedMarkTraits::tryEnqueue(objects, object.get().header()); for (auto& object : graySet) ScopedMarkTraits::tryEnqueue(objects, object.get().header());
auto handle = gc::GCHandle::create(epoch_++); auto handle = gc::GCHandle::create(epoch_++);
gc::Mark<ScopedMarkTraits>(handle, objects); gc::Mark<ScopedMarkTraits>(handle, objects);
@@ -5,6 +5,7 @@
#include "GCImpl.hpp" #include "GCImpl.hpp"
#include "Common.h"
#include "GCStatistics.hpp" #include "GCStatistics.hpp"
#include "KAssert.h" #include "KAssert.h"
#include "Logging.hpp" #include "Logging.hpp"
@@ -5,17 +5,18 @@
#include "GCImpl.hpp" #include "GCImpl.hpp"
#include <memory>
#include "GC.hpp" #include "GC.hpp"
#include "GCStatistics.hpp" #include "GCStatistics.hpp"
#include "GlobalData.hpp" #include "GlobalData.hpp"
#include "MarkAndSweepUtils.hpp" #include "MarkAndSweepUtils.hpp"
#include "ObjectOps.hpp" #include "ObjectOps.hpp"
#include "SameThreadMarkAndSweep.hpp" #include "SameThreadMarkAndSweep.hpp"
#include "std_support/Memory.hpp"
using namespace kotlin; using namespace kotlin;
gc::GC::ThreadData::ThreadData(GC& gc, mm::ThreadData& threadData) noexcept : impl_(std_support::make_unique<Impl>(gc, threadData)) {} gc::GC::ThreadData::ThreadData(GC& gc, mm::ThreadData& threadData) noexcept : impl_(std::make_unique<Impl>(gc, threadData)) {}
gc::GC::ThreadData::~ThreadData() = default; gc::GC::ThreadData::~ThreadData() = default;
@@ -28,7 +29,7 @@ void gc::GC::ThreadData::safePoint() noexcept {}
void gc::GC::ThreadData::onThreadRegistration() noexcept {} void gc::GC::ThreadData::onThreadRegistration() noexcept {}
gc::GC::GC(alloc::Allocator& allocator, gcScheduler::GCScheduler& gcScheduler) noexcept : gc::GC::GC(alloc::Allocator& allocator, gcScheduler::GCScheduler& gcScheduler) noexcept :
impl_(std_support::make_unique<Impl>(allocator, gcScheduler)) {} impl_(std::make_unique<Impl>(allocator, gcScheduler)) {}
gc::GC::~GC() = default; gc::GC::~GC() = default;
@@ -7,8 +7,10 @@
#include <condition_variable> #include <condition_variable>
#include <future> #include <future>
#include <memory>
#include <mutex> #include <mutex>
#include <thread> #include <thread>
#include <vector>
#include "gmock/gmock.h" #include "gmock/gmock.h"
#include "gtest/gtest.h" #include "gtest/gtest.h"
@@ -25,8 +27,6 @@
#include "TestSupport.hpp" #include "TestSupport.hpp"
#include "ThreadData.hpp" #include "ThreadData.hpp"
#include "WeakRef.hpp" #include "WeakRef.hpp"
#include "std_support/Memory.hpp"
#include "std_support/Vector.hpp"
using namespace kotlin; using namespace kotlin;
@@ -638,7 +638,7 @@ public:
StackObjectHolder& AddStackRoot() { StackObjectHolder& AddStackRoot() {
RuntimeAssert(std::this_thread::get_id() == executor_.threadId(), "AddStackRoot can only be called in the mutator thread"); RuntimeAssert(std::this_thread::get_id() == executor_.threadId(), "AddStackRoot can only be called in the mutator thread");
auto& context = executor_.context(); auto& context = executor_.context();
auto holder = std_support::make_unique<StackObjectHolder>(*context.memory_->memoryState()->GetThreadData()); auto holder = std::make_unique<StackObjectHolder>(*context.memory_->memoryState()->GetThreadData());
auto& holderRef = *holder; auto& holderRef = *holder;
context.stackRoots_.push_back(std::move(holder)); context.stackRoots_.push_back(std::move(holder));
return holderRef; return holderRef;
@@ -647,7 +647,7 @@ public:
StackObjectHolder& AddStackRoot(ObjHeader* object) { StackObjectHolder& AddStackRoot(ObjHeader* object) {
RuntimeAssert(std::this_thread::get_id() == executor_.threadId(), "AddStackRoot can only be called in the mutator thread"); RuntimeAssert(std::this_thread::get_id() == executor_.threadId(), "AddStackRoot can only be called in the mutator thread");
auto& context = executor_.context(); auto& context = executor_.context();
auto holder = std_support::make_unique<StackObjectHolder>(object); auto holder = std::make_unique<StackObjectHolder>(object);
auto& holderRef = *holder; auto& holderRef = *holder;
context.stackRoots_.push_back(std::move(holder)); context.stackRoots_.push_back(std::move(holder));
return holderRef; return holderRef;
@@ -656,7 +656,7 @@ public:
GlobalObjectHolder& AddGlobalRoot() { GlobalObjectHolder& AddGlobalRoot() {
RuntimeAssert(std::this_thread::get_id() == executor_.threadId(), "AddGlobalRoot can only be called in the mutator thread"); RuntimeAssert(std::this_thread::get_id() == executor_.threadId(), "AddGlobalRoot can only be called in the mutator thread");
auto& context = executor_.context(); auto& context = executor_.context();
auto holder = std_support::make_unique<GlobalObjectHolder>(*context.memory_->memoryState()->GetThreadData()); auto holder = std::make_unique<GlobalObjectHolder>(*context.memory_->memoryState()->GetThreadData());
auto& holderRef = *holder; auto& holderRef = *holder;
context.globalRoots_.push_back(std::move(holder)); context.globalRoots_.push_back(std::move(holder));
return holderRef; return holderRef;
@@ -665,7 +665,7 @@ public:
GlobalObjectHolder& AddGlobalRoot(ObjHeader* object) { GlobalObjectHolder& AddGlobalRoot(ObjHeader* object) {
RuntimeAssert(std::this_thread::get_id() == executor_.threadId(), "AddGlobalRoot can only be called in the mutator thread"); RuntimeAssert(std::this_thread::get_id() == executor_.threadId(), "AddGlobalRoot can only be called in the mutator thread");
auto& context = executor_.context(); auto& context = executor_.context();
auto holder = std_support::make_unique<GlobalObjectHolder>(*context.memory_->memoryState()->GetThreadData(), object); auto holder = std::make_unique<GlobalObjectHolder>(*context.memory_->memoryState()->GetThreadData(), object);
auto& holderRef = *holder; auto& holderRef = *holder;
context.globalRoots_.push_back(std::move(holder)); context.globalRoots_.push_back(std::move(holder));
return holderRef; return holderRef;
@@ -675,11 +675,11 @@ public:
private: private:
struct Context { struct Context {
std_support::unique_ptr<ScopedMemoryInit> memory_; std::unique_ptr<ScopedMemoryInit> memory_;
std_support::vector<std_support::unique_ptr<StackObjectHolder>> stackRoots_; std::vector<std::unique_ptr<StackObjectHolder>> stackRoots_;
std_support::vector<std_support::unique_ptr<GlobalObjectHolder>> globalRoots_; std::vector<std::unique_ptr<GlobalObjectHolder>> globalRoots_;
Context() : memory_(std_support::make_unique<ScopedMemoryInit>()) { Context() : memory_(std::make_unique<ScopedMemoryInit>()) {
// SingleThreadExecutor must work in the runnable state, so that GC does not collect between tasks. // SingleThreadExecutor must work in the runnable state, so that GC does not collect between tasks.
AssertThreadState(memory_->memoryState(), ThreadState::kRunnable); AssertThreadState(memory_->memoryState(), ThreadState::kRunnable);
} }
@@ -691,10 +691,10 @@ private:
} // namespace } // namespace
TEST_F(SameThreadMarkAndSweepTest, MultipleMutatorsCollect) { TEST_F(SameThreadMarkAndSweepTest, MultipleMutatorsCollect) {
std_support::vector<Mutator> mutators(kDefaultThreadCount); std::vector<Mutator> mutators(kDefaultThreadCount);
std_support::vector<ObjHeader*> globals(kDefaultThreadCount); std::vector<ObjHeader*> globals(kDefaultThreadCount);
std_support::vector<ObjHeader*> locals(kDefaultThreadCount); std::vector<ObjHeader*> locals(kDefaultThreadCount);
std_support::vector<ObjHeader*> reachables(kDefaultThreadCount); std::vector<ObjHeader*> reachables(kDefaultThreadCount);
auto expandRootSet = [&globals, &locals, &reachables](mm::ThreadData& threadData, Mutator& mutator, int i) { auto expandRootSet = [&globals, &locals, &reachables](mm::ThreadData& threadData, Mutator& mutator, int i) {
auto& global = mutator.AddGlobalRoot(); auto& global = mutator.AddGlobalRoot();
@@ -713,7 +713,7 @@ TEST_F(SameThreadMarkAndSweepTest, MultipleMutatorsCollect) {
.wait(); .wait();
} }
std_support::vector<std::future<void>> gcFutures; std::vector<std::future<void>> gcFutures;
std::atomic<bool> gcDone = false; std::atomic<bool> gcDone = false;
for (auto& mutator : mutators) { for (auto& mutator : mutators) {
@@ -731,7 +731,7 @@ TEST_F(SameThreadMarkAndSweepTest, MultipleMutatorsCollect) {
future.wait(); future.wait();
} }
std_support::vector<ObjHeader*> expectedAlive; std::vector<ObjHeader*> expectedAlive;
for (auto& global : globals) { for (auto& global : globals) {
expectedAlive.push_back(global); expectedAlive.push_back(global);
} }
@@ -748,10 +748,10 @@ TEST_F(SameThreadMarkAndSweepTest, MultipleMutatorsCollect) {
} }
TEST_F(SameThreadMarkAndSweepTest, MultipleMutatorsAllCollect) { TEST_F(SameThreadMarkAndSweepTest, MultipleMutatorsAllCollect) {
std_support::vector<Mutator> mutators(kDefaultThreadCount); std::vector<Mutator> mutators(kDefaultThreadCount);
std_support::vector<ObjHeader*> globals(kDefaultThreadCount); std::vector<ObjHeader*> globals(kDefaultThreadCount);
std_support::vector<ObjHeader*> locals(kDefaultThreadCount); std::vector<ObjHeader*> locals(kDefaultThreadCount);
std_support::vector<ObjHeader*> reachables(kDefaultThreadCount); std::vector<ObjHeader*> reachables(kDefaultThreadCount);
auto expandRootSet = [&globals, &locals, &reachables](mm::ThreadData& threadData, Mutator& mutator, int i) { auto expandRootSet = [&globals, &locals, &reachables](mm::ThreadData& threadData, Mutator& mutator, int i) {
auto& global = mutator.AddGlobalRoot(); auto& global = mutator.AddGlobalRoot();
@@ -770,7 +770,7 @@ TEST_F(SameThreadMarkAndSweepTest, MultipleMutatorsAllCollect) {
.wait(); .wait();
} }
std_support::vector<std::future<void>> gcFutures; std::vector<std::future<void>> gcFutures;
for (auto& mutator : mutators) { for (auto& mutator : mutators) {
gcFutures.emplace_back(mutator.Execute([](mm::ThreadData& threadData, Mutator& mutator) { gcFutures.emplace_back(mutator.Execute([](mm::ThreadData& threadData, Mutator& mutator) {
@@ -793,7 +793,7 @@ TEST_F(SameThreadMarkAndSweepTest, MultipleMutatorsAllCollect) {
.wait(); .wait();
} }
std_support::vector<ObjHeader*> expectedAlive; std::vector<ObjHeader*> expectedAlive;
for (auto& global : globals) { for (auto& global : globals) {
expectedAlive.push_back(global); expectedAlive.push_back(global);
} }
@@ -810,10 +810,10 @@ TEST_F(SameThreadMarkAndSweepTest, MultipleMutatorsAllCollect) {
} }
TEST_F(SameThreadMarkAndSweepTest, MultipleMutatorsAddToRootSetAfterCollectionRequested) { TEST_F(SameThreadMarkAndSweepTest, MultipleMutatorsAddToRootSetAfterCollectionRequested) {
std_support::vector<Mutator> mutators(kDefaultThreadCount); std::vector<Mutator> mutators(kDefaultThreadCount);
std_support::vector<ObjHeader*> globals(kDefaultThreadCount); std::vector<ObjHeader*> globals(kDefaultThreadCount);
std_support::vector<ObjHeader*> locals(kDefaultThreadCount); std::vector<ObjHeader*> locals(kDefaultThreadCount);
std_support::vector<ObjHeader*> reachables(kDefaultThreadCount); std::vector<ObjHeader*> reachables(kDefaultThreadCount);
auto allocateInHeap = [&globals, &locals, &reachables](mm::ThreadData& threadData, Mutator& mutator, int i) { auto allocateInHeap = [&globals, &locals, &reachables](mm::ThreadData& threadData, Mutator& mutator, int i) {
auto& global = AllocateObject(threadData); auto& global = AllocateObject(threadData);
@@ -840,7 +840,7 @@ TEST_F(SameThreadMarkAndSweepTest, MultipleMutatorsAddToRootSetAfterCollectionRe
.wait(); .wait();
} }
std_support::vector<std::future<void>> gcFutures; std::vector<std::future<void>> gcFutures;
auto epoch = mm::GlobalData::Instance().gc().Schedule(); auto epoch = mm::GlobalData::Instance().gc().Schedule();
std::atomic<bool> gcDone = false; std::atomic<bool> gcDone = false;
@@ -864,7 +864,7 @@ TEST_F(SameThreadMarkAndSweepTest, MultipleMutatorsAddToRootSetAfterCollectionRe
future.wait(); future.wait();
} }
std_support::vector<ObjHeader*> expectedAlive; std::vector<ObjHeader*> expectedAlive;
for (auto& global : globals) { for (auto& global : globals) {
expectedAlive.push_back(global); expectedAlive.push_back(global);
} }
@@ -881,10 +881,10 @@ TEST_F(SameThreadMarkAndSweepTest, MultipleMutatorsAddToRootSetAfterCollectionRe
} }
TEST_F(SameThreadMarkAndSweepTest, CrossThreadReference) { TEST_F(SameThreadMarkAndSweepTest, CrossThreadReference) {
std_support::vector<Mutator> mutators(kDefaultThreadCount); std::vector<Mutator> mutators(kDefaultThreadCount);
std_support::vector<ObjHeader*> globals(kDefaultThreadCount); std::vector<ObjHeader*> globals(kDefaultThreadCount);
std_support::vector<ObjHeader*> locals(kDefaultThreadCount); std::vector<ObjHeader*> locals(kDefaultThreadCount);
std_support::vector<ObjHeader*> reachables(2 * kDefaultThreadCount); std::vector<ObjHeader*> reachables(2 * kDefaultThreadCount);
auto expandRootSet = [&globals, &locals, &reachables](mm::ThreadData& threadData, Mutator& mutator, int i) { auto expandRootSet = [&globals, &locals, &reachables](mm::ThreadData& threadData, Mutator& mutator, int i) {
auto& global = mutator.AddGlobalRoot(); auto& global = mutator.AddGlobalRoot();
@@ -910,7 +910,7 @@ TEST_F(SameThreadMarkAndSweepTest, CrossThreadReference) {
.wait(); .wait();
} }
std_support::vector<std::future<void>> gcFutures; std::vector<std::future<void>> gcFutures;
std::atomic<bool> gcDone = false; std::atomic<bool> gcDone = false;
for (auto& mutator : mutators) { for (auto& mutator : mutators) {
@@ -928,7 +928,7 @@ TEST_F(SameThreadMarkAndSweepTest, CrossThreadReference) {
future.wait(); future.wait();
} }
std_support::vector<ObjHeader*> expectedAlive; std::vector<ObjHeader*> expectedAlive;
for (auto& global : globals) { for (auto& global : globals) {
expectedAlive.push_back(global); expectedAlive.push_back(global);
} }
@@ -948,7 +948,7 @@ TEST_F(SameThreadMarkAndSweepTest, CrossThreadReference) {
} }
TEST_F(SameThreadMarkAndSweepTest, MultipleMutatorsWeaks) { TEST_F(SameThreadMarkAndSweepTest, MultipleMutatorsWeaks) {
std_support::vector<Mutator> mutators(kDefaultThreadCount); std::vector<Mutator> mutators(kDefaultThreadCount);
ObjHeader* globalRoot = nullptr; ObjHeader* globalRoot = nullptr;
test_support::RegularWeakReferenceImpl* weak = nullptr; test_support::RegularWeakReferenceImpl* weak = nullptr;
@@ -972,7 +972,7 @@ TEST_F(SameThreadMarkAndSweepTest, MultipleMutatorsWeaks) {
mutators[i].Execute([](mm::ThreadData& threadData, Mutator& mutator) {}).wait(); mutators[i].Execute([](mm::ThreadData& threadData, Mutator& mutator) {}).wait();
} }
std_support::vector<std::future<void>> gcFutures; std::vector<std::future<void>> gcFutures;
auto epoch = mm::GlobalData::Instance().gc().Schedule(); auto epoch = mm::GlobalData::Instance().gc().Schedule();
std::atomic<bool> gcDone = false; std::atomic<bool> gcDone = false;
@@ -1002,14 +1002,14 @@ TEST_F(SameThreadMarkAndSweepTest, MultipleMutatorsWeaks) {
} }
TEST_F(SameThreadMarkAndSweepTest, MultipleMutatorsWeakNewObj) { TEST_F(SameThreadMarkAndSweepTest, MultipleMutatorsWeakNewObj) {
std_support::vector<Mutator> mutators(kDefaultThreadCount); std::vector<Mutator> mutators(kDefaultThreadCount);
// Make sure all mutators are initialized. // Make sure all mutators are initialized.
for (int i = 0; i < kDefaultThreadCount; ++i) { for (int i = 0; i < kDefaultThreadCount; ++i) {
mutators[i].Execute([](mm::ThreadData& threadData, Mutator& mutator) {}).wait(); mutators[i].Execute([](mm::ThreadData& threadData, Mutator& mutator) {}).wait();
} }
std_support::vector<std::future<void>> gcFutures; std::vector<std::future<void>> gcFutures;
auto epoch = mm::GlobalData::Instance().gc().Schedule(); auto epoch = mm::GlobalData::Instance().gc().Schedule();
std::atomic<bool> gcDone = false; std::atomic<bool> gcDone = false;
@@ -1048,11 +1048,11 @@ TEST_F(SameThreadMarkAndSweepTest, MultipleMutatorsWeakNewObj) {
} }
TEST_F(SameThreadMarkAndSweepTest, NewThreadsWhileRequestingCollection) { TEST_F(SameThreadMarkAndSweepTest, NewThreadsWhileRequestingCollection) {
std_support::vector<Mutator> mutators(kDefaultThreadCount); std::vector<Mutator> mutators(kDefaultThreadCount);
std_support::vector<ObjHeader*> globals(2 * kDefaultThreadCount); std::vector<ObjHeader*> globals(2 * kDefaultThreadCount);
std_support::vector<ObjHeader*> locals(2 * kDefaultThreadCount); std::vector<ObjHeader*> locals(2 * kDefaultThreadCount);
std_support::vector<ObjHeader*> reachables(2 * kDefaultThreadCount); std::vector<ObjHeader*> reachables(2 * kDefaultThreadCount);
std_support::vector<ObjHeader*> unreachables(2 * kDefaultThreadCount); std::vector<ObjHeader*> unreachables(2 * kDefaultThreadCount);
auto expandRootSet = [&globals, &locals, &reachables, &unreachables](mm::ThreadData& threadData, Mutator& mutator, int i) { auto expandRootSet = [&globals, &locals, &reachables, &unreachables](mm::ThreadData& threadData, Mutator& mutator, int i) {
auto& global = mutator.AddGlobalRoot(); auto& global = mutator.AddGlobalRoot();
@@ -1072,7 +1072,7 @@ TEST_F(SameThreadMarkAndSweepTest, NewThreadsWhileRequestingCollection) {
.wait(); .wait();
} }
std_support::vector<std::future<void>> gcFutures; std::vector<std::future<void>> gcFutures;
auto epoch = mm::GlobalData::Instance().gc().Schedule(); auto epoch = mm::GlobalData::Instance().gc().Schedule();
std::atomic<bool> gcDone = false; std::atomic<bool> gcDone = false;
@@ -1081,8 +1081,8 @@ TEST_F(SameThreadMarkAndSweepTest, NewThreadsWhileRequestingCollection) {
} }
// Now start attaching new threads. // Now start attaching new threads.
std_support::vector<Mutator> newMutators(kDefaultThreadCount); std::vector<Mutator> newMutators(kDefaultThreadCount);
std_support::vector<std::future<void>> attachFutures(kDefaultThreadCount); std::vector<std::future<void>> attachFutures(kDefaultThreadCount);
for (int i = 0; i < kDefaultThreadCount; ++i) { for (int i = 0; i < kDefaultThreadCount; ++i) {
attachFutures[i] = newMutators[i].Execute([&gcDone, i, expandRootSet](mm::ThreadData& threadData, Mutator& mutator) { attachFutures[i] = newMutators[i].Execute([&gcDone, i, expandRootSet](mm::ThreadData& threadData, Mutator& mutator) {
@@ -1116,7 +1116,7 @@ TEST_F(SameThreadMarkAndSweepTest, NewThreadsWhileRequestingCollection) {
future.wait(); future.wait();
} }
std_support::vector<ObjHeader*> expectedAlive; std::vector<ObjHeader*> expectedAlive;
for (int i = 0; i < kDefaultThreadCount; ++i) { for (int i = 0; i < kDefaultThreadCount; ++i) {
expectedAlive.push_back(globals[i]); expectedAlive.push_back(globals[i]);
expectedAlive.push_back(locals[i]); expectedAlive.push_back(locals[i]);
@@ -1129,7 +1129,7 @@ TEST_F(SameThreadMarkAndSweepTest, NewThreadsWhileRequestingCollection) {
} }
// Force mutators to publish their internal heaps // Force mutators to publish their internal heaps
std_support::vector<std::future<void>> publishFutures; std::vector<std::future<void>> publishFutures;
for (auto& mutator: mutators) { for (auto& mutator: mutators) {
publishFutures.emplace_back( publishFutures.emplace_back(
mutator.Execute([](mm::ThreadData& threadData, Mutator& mutator) { threadData.allocator().prepareForGC(); })); mutator.Execute([](mm::ThreadData& threadData, Mutator& mutator) { threadData.allocator().prepareForGC(); }));
@@ -1148,7 +1148,7 @@ TEST_F(SameThreadMarkAndSweepTest, NewThreadsWhileRequestingCollection) {
TEST_F(SameThreadMarkAndSweepTest, FreeObjectWithFreeWeakReversedOrder) { TEST_F(SameThreadMarkAndSweepTest, FreeObjectWithFreeWeakReversedOrder) {
std_support::vector<Mutator> mutators(2); std::vector<Mutator> mutators(2);
std::atomic<test_support::Object<Payload>*> object1 = nullptr; std::atomic<test_support::Object<Payload>*> object1 = nullptr;
std::atomic<test_support::RegularWeakReferenceImpl*> weak = nullptr; std::atomic<test_support::RegularWeakReferenceImpl*> weak = nullptr;
std::atomic<bool> done = false; std::atomic<bool> done = false;
@@ -16,7 +16,7 @@ gcScheduler::GCScheduler::ThreadData::Impl::Impl(GCScheduler& scheduler, mm::Thr
scheduler_(scheduler.impl().impl()), mutatorAssists_(scheduler_.mutatorAssists(), thread) {} scheduler_(scheduler.impl().impl()), mutatorAssists_(scheduler_.mutatorAssists(), thread) {}
gcScheduler::GCScheduler::ThreadData::ThreadData(gcScheduler::GCScheduler& scheduler, mm::ThreadData& thread) noexcept : gcScheduler::GCScheduler::ThreadData::ThreadData(gcScheduler::GCScheduler& scheduler, mm::ThreadData& thread) noexcept :
impl_(std_support::make_unique<Impl>(scheduler, thread)) {} impl_(std::make_unique<Impl>(scheduler, thread)) {}
gcScheduler::GCScheduler::ThreadData::~ThreadData() = default; gcScheduler::GCScheduler::ThreadData::~ThreadData() = default;
@@ -25,7 +25,7 @@ gcScheduler::GCScheduler::Impl::Impl(gcScheduler::GCSchedulerConfig& config) noe
return mm::GlobalData::Instance().gc().Schedule(); return mm::GlobalData::Instance().gc().Schedule();
}) {} }) {}
gcScheduler::GCScheduler::GCScheduler() noexcept : impl_(std_support::make_unique<Impl>(config_)) {} gcScheduler::GCScheduler::GCScheduler() noexcept : impl_(std::make_unique<Impl>(config_)) {}
gcScheduler::GCScheduler::~GCScheduler() = default; gcScheduler::GCScheduler::~GCScheduler() = default;
@@ -5,6 +5,8 @@
#include "GCSchedulerImpl.hpp" #include "GCSchedulerImpl.hpp"
#include <vector>
#include "gmock/gmock.h" #include "gmock/gmock.h"
#include "gtest/gtest.h" #include "gtest/gtest.h"
@@ -12,7 +14,6 @@
#include "ClockTestSupport.hpp" #include "ClockTestSupport.hpp"
#include "SingleThreadExecutor.hpp" #include "SingleThreadExecutor.hpp"
#include "TestSupport.hpp" #include "TestSupport.hpp"
#include "std_support/Vector.hpp"
using namespace kotlin; using namespace kotlin;
@@ -44,7 +45,7 @@ public:
explicit GCSchedulerDataTestApi(gcScheduler::GCSchedulerConfig& config) : scheduler_(config, scheduleGC_.AsStdFunction()) { explicit GCSchedulerDataTestApi(gcScheduler::GCSchedulerConfig& config) : scheduler_(config, scheduleGC_.AsStdFunction()) {
mutators_.reserve(MutatorCount); mutators_.reserve(MutatorCount);
for (int i = 0; i < MutatorCount; ++i) { for (int i = 0; i < MutatorCount; ++i) {
mutators_.emplace_back(std_support::make_unique<MutatorThread>(scheduler_)); mutators_.emplace_back(std::make_unique<MutatorThread>(scheduler_));
} }
} }
@@ -72,7 +73,7 @@ public:
private: private:
std::atomic<size_t> allocatedBytes_ = 0; std::atomic<size_t> allocatedBytes_ = 0;
std_support::vector<std_support::unique_ptr<MutatorThread>> mutators_; std::vector<std::unique_ptr<MutatorThread>> mutators_;
testing::MockFunction<int64_t()> scheduleGC_; testing::MockFunction<int64_t()> scheduleGC_;
gcScheduler::internal::GCSchedulerDataAdaptive<test_support::manual_clock> scheduler_; gcScheduler::internal::GCSchedulerDataAdaptive<test_support::manual_clock> scheduler_;
}; };
@@ -96,7 +97,7 @@ TEST_F(AdaptiveSchedulerTest, CollectOnTargetHeapReached) {
GCSchedulerDataTestApi<mutatorsCount> schedulerTestApi(config); GCSchedulerDataTestApi<mutatorsCount> schedulerTestApi(config);
EXPECT_CALL(schedulerTestApi.scheduleGC(), Call()).Times(0); EXPECT_CALL(schedulerTestApi.scheduleGC(), Call()).Times(0);
std_support::vector<std::future<void>> futures; std::vector<std::future<void>> futures;
for (int i = 0; i < mutatorsCount; ++i) { for (int i = 0; i < mutatorsCount; ++i) {
futures.push_back(schedulerTestApi.Allocate(i, 9)); futures.push_back(schedulerTestApi.Allocate(i, 9));
} }
@@ -136,7 +137,7 @@ TEST_F(AdaptiveSchedulerTest, CollectOnTargetHeapReachedWithoutAssists) {
GCSchedulerDataTestApi<mutatorsCount> schedulerTestApi(config); GCSchedulerDataTestApi<mutatorsCount> schedulerTestApi(config);
EXPECT_CALL(schedulerTestApi.scheduleGC(), Call()).Times(0); EXPECT_CALL(schedulerTestApi.scheduleGC(), Call()).Times(0);
std_support::vector<std::future<void>> futures; std::vector<std::future<void>> futures;
for (int i = 0; i < mutatorsCount; ++i) { for (int i = 0; i < mutatorsCount; ++i) {
futures.push_back(schedulerTestApi.Allocate(i, 9)); futures.push_back(schedulerTestApi.Allocate(i, 9));
} }
@@ -16,7 +16,7 @@ gcScheduler::GCScheduler::ThreadData::Impl::Impl(GCScheduler& scheduler, mm::Thr
scheduler_(scheduler.impl().impl()), mutatorAssists_(scheduler_.mutatorAssists(), thread) {} scheduler_(scheduler.impl().impl()), mutatorAssists_(scheduler_.mutatorAssists(), thread) {}
gcScheduler::GCScheduler::ThreadData::ThreadData(gcScheduler::GCScheduler& gcScheduler, mm::ThreadData& thread) noexcept : gcScheduler::GCScheduler::ThreadData::ThreadData(gcScheduler::GCScheduler& gcScheduler, mm::ThreadData& thread) noexcept :
impl_(std_support::make_unique<Impl>(gcScheduler, thread)) {} impl_(std::make_unique<Impl>(gcScheduler, thread)) {}
gcScheduler::GCScheduler::ThreadData::~ThreadData() = default; gcScheduler::GCScheduler::ThreadData::~ThreadData() = default;
@@ -25,7 +25,7 @@ gcScheduler::GCScheduler::Impl::Impl(gcScheduler::GCSchedulerConfig& config) noe
return mm::GlobalData::Instance().gc().Schedule(); return mm::GlobalData::Instance().gc().Schedule();
}) {} }) {}
gcScheduler::GCScheduler::GCScheduler() noexcept : impl_(std_support::make_unique<Impl>(config_)) {} gcScheduler::GCScheduler::GCScheduler() noexcept : impl_(std::make_unique<Impl>(config_)) {}
gcScheduler::GCScheduler::~GCScheduler() = default; gcScheduler::GCScheduler::~GCScheduler() = default;
@@ -6,13 +6,13 @@
#pragma once #pragma once
#include <cstddef> #include <cstddef>
#include <unordered_set>
#include "CallsChecker.hpp" #include "CallsChecker.hpp"
#include "KAssert.h" #include "KAssert.h"
#include "Logging.hpp" #include "Logging.hpp"
#include "Mutex.hpp" #include "Mutex.hpp"
#include "StackTrace.hpp" #include "StackTrace.hpp"
#include "std_support/UnorderedSet.hpp"
namespace kotlin::gcScheduler::internal { namespace kotlin::gcScheduler::internal {
@@ -49,7 +49,7 @@ private:
// TODO: Consider replacing mutex + global set with thread local sets sychronized on STW. // TODO: Consider replacing mutex + global set with thread local sets sychronized on STW.
SpinLock<MutexThreadStateHandling::kIgnore> mutex_; SpinLock<MutexThreadStateHandling::kIgnore> mutex_;
std_support::unordered_set<SafePointID> metSafePoints_; std::unordered_set<SafePointID> metSafePoints_;
}; };
} // namespace kotlin::gcScheduler::internal } // namespace kotlin::gcScheduler::internal
@@ -146,7 +146,7 @@ TEST(EpochSchedulerTest, StressScheduleNext) {
}; };
std::atomic<bool> canStop = false; std::atomic<bool> canStop = false;
std_support::vector<ScopedThread> threads; std::vector<ScopedThread> threads;
for (int i = 0; i < kDefaultThreadCount; ++i) { for (int i = 0; i < kDefaultThreadCount; ++i) {
threads.emplace_back([&, i] { threads.emplace_back([&, i] {
Epoch pastEpoch = 0; Epoch pastEpoch = 0;
@@ -6,13 +6,13 @@
#pragma once #pragma once
#include <cstddef> #include <cstddef>
#include <memory>
#include <functional> #include <functional>
#include <utility> #include <utility>
#include "GCSchedulerConfig.hpp" #include "GCSchedulerConfig.hpp"
#include "KAssert.h" #include "KAssert.h"
#include "Utils.hpp" #include "Utils.hpp"
#include "std_support/Memory.hpp"
namespace kotlin::mm { namespace kotlin::mm {
class ThreadData; class ThreadData;
@@ -36,7 +36,7 @@ public:
void safePoint() noexcept; void safePoint() noexcept;
private: private:
std_support::unique_ptr<Impl> impl_; std::unique_ptr<Impl> impl_;
}; };
GCScheduler() noexcept; GCScheduler() noexcept;
@@ -66,7 +66,7 @@ public:
private: private:
GCSchedulerConfig config_; GCSchedulerConfig config_;
std_support::unique_ptr<Impl> impl_; std::unique_ptr<Impl> impl_;
}; };
} // namespace kotlin::gcScheduler } // namespace kotlin::gcScheduler
@@ -5,6 +5,7 @@
#include "MutatorAssists.hpp" #include "MutatorAssists.hpp"
#include <map>
#include <shared_mutex> #include <shared_mutex>
#include <sstream> #include <sstream>
@@ -13,7 +14,6 @@
#include "SafePoint.hpp" #include "SafePoint.hpp"
#include "TestSupport.hpp" #include "TestSupport.hpp"
#include "std_support/Map.hpp"
using namespace kotlin; using namespace kotlin;
@@ -96,7 +96,7 @@ private:
MutatorAssists assists_; MutatorAssists assists_;
RWSpinLock<MutexThreadStateHandling::kIgnore> mutatorMapMutex_; RWSpinLock<MutexThreadStateHandling::kIgnore> mutatorMapMutex_;
std_support::map<mm::ThreadData*, Mutator*> mutatorMap_; std::map<mm::ThreadData*, Mutator*> mutatorMap_;
}; };
TEST_F(MutatorAssistsTest, EnableSafePointsWhenRequestingAssists) { TEST_F(MutatorAssistsTest, EnableSafePointsWhenRequestingAssists) {
@@ -124,9 +124,9 @@ TEST_F(MutatorAssistsTest, StressEnableSafePointsByMutators) {
std::array<std::atomic<bool>, epochsCount> enabled = {false}; std::array<std::atomic<bool>, epochsCount> enabled = {false};
std::atomic<bool> canStart = false; std::atomic<bool> canStart = false;
std::atomic<bool> canStop = false; std::atomic<bool> canStop = false;
std_support::vector<std_support::unique_ptr<Mutator>> mutators; std::vector<std::unique_ptr<Mutator>> mutators;
for (int i = 0; i < kDefaultThreadCount; ++i) { for (int i = 0; i < kDefaultThreadCount; ++i) {
mutators.emplace_back(std_support::make_unique<Mutator>(*this, [&, i](Mutator&) noexcept { mutators.emplace_back(std::make_unique<Mutator>(*this, [&, i](Mutator&) noexcept {
while (!canStart.load(std::memory_order_relaxed)) { while (!canStart.load(std::memory_order_relaxed)) {
std::this_thread::yield(); std::this_thread::yield();
} }
@@ -157,9 +157,9 @@ TEST_F(MutatorAssistsTest, Assist) {
std::array<std::atomic<size_t>, epochsCount> started = {0}; std::array<std::atomic<size_t>, epochsCount> started = {0};
std::array<std::atomic<size_t>, epochsCount> finished = {0}; std::array<std::atomic<size_t>, epochsCount> finished = {0};
std::atomic<Epoch> gcCompleted = 0; std::atomic<Epoch> gcCompleted = 0;
std_support::vector<std_support::unique_ptr<Mutator>> mutators; std::vector<std::unique_ptr<Mutator>> mutators;
for (int i = 0; i < kDefaultThreadCount; ++i) { for (int i = 0; i < kDefaultThreadCount; ++i) {
mutators.emplace_back(std_support::make_unique<Mutator>(*this, [&](Mutator&) noexcept { mutators.emplace_back(std::make_unique<Mutator>(*this, [&](Mutator&) noexcept {
for (Epoch epoch = 0; epoch < epochsCount; ++epoch) { for (Epoch epoch = 0; epoch < epochsCount; ++epoch) {
while (!canStart[epoch].load(std::memory_order_relaxed)) { while (!canStart[epoch].load(std::memory_order_relaxed)) {
std::this_thread::yield(); std::this_thread::yield();
@@ -213,9 +213,9 @@ TEST_F(MutatorAssistsTest, AssistNoSync) {
constexpr Epoch epochsCount = 10000; constexpr Epoch epochsCount = 10000;
std::atomic<bool> canStop = false; std::atomic<bool> canStop = false;
std::atomic<size_t> finished = 0; std::atomic<size_t> finished = 0;
std_support::vector<std_support::unique_ptr<Mutator>> mutators; std::vector<std::unique_ptr<Mutator>> mutators;
for (int i = 0; i < kDefaultThreadCount; ++i) { for (int i = 0; i < kDefaultThreadCount; ++i) {
mutators.emplace_back(std_support::make_unique<Mutator>(*this, [&](Mutator&) noexcept { mutators.emplace_back(std::make_unique<Mutator>(*this, [&](Mutator&) noexcept {
while (!canStop.load(std::memory_order_relaxed)) { while (!canStop.load(std::memory_order_relaxed)) {
safePoint(); safePoint();
std::this_thread::yield(); std::this_thread::yield();
@@ -247,9 +247,9 @@ TEST_F(MutatorAssistsTest, AssistWithNativeMutators) {
constexpr Epoch epochsCount = 10000; constexpr Epoch epochsCount = 10000;
std::atomic<bool> canStop = false; std::atomic<bool> canStop = false;
std::atomic<size_t> finished = 0; std::atomic<size_t> finished = 0;
std_support::vector<std_support::unique_ptr<Mutator>> mutators; std::vector<std::unique_ptr<Mutator>> mutators;
for (int i = 0; i < kDefaultThreadCount; ++i) { for (int i = 0; i < kDefaultThreadCount; ++i) {
mutators.emplace_back(std_support::make_unique<Mutator>(*this, [&, i](Mutator&) noexcept { mutators.emplace_back(std::make_unique<Mutator>(*this, [&, i](Mutator&) noexcept {
if (i % 2 == 0) { if (i % 2 == 0) {
ThreadStateGuard guard(ThreadState::kNative); ThreadStateGuard guard(ThreadState::kNative);
while (!canStop.load(std::memory_order_relaxed)) { while (!canStop.load(std::memory_order_relaxed)) {
@@ -290,9 +290,9 @@ TEST_F(MutatorAssistsTest, AssistNoRequests) {
std::atomic<bool> canStop = false; std::atomic<bool> canStop = false;
std::atomic<size_t> started = 0; std::atomic<size_t> started = 0;
std::atomic<size_t> finished = 0; std::atomic<size_t> finished = 0;
std_support::vector<std_support::unique_ptr<Mutator>> mutators; std::vector<std::unique_ptr<Mutator>> mutators;
for (int i = 0; i < kDefaultThreadCount; ++i) { for (int i = 0; i < kDefaultThreadCount; ++i) {
mutators.emplace_back(std_support::make_unique<Mutator>(*this, [&](Mutator&) noexcept { mutators.emplace_back(std::make_unique<Mutator>(*this, [&](Mutator&) noexcept {
while (!canStart.load(std::memory_order_relaxed)) { while (!canStart.load(std::memory_order_relaxed)) {
std::this_thread::yield(); std::this_thread::yield();
} }
@@ -331,9 +331,9 @@ TEST_F(MutatorAssistsTest, AssistRequestsByMutators) {
std::atomic<size_t> started = 0; std::atomic<size_t> started = 0;
std::atomic<size_t> finished = 0; std::atomic<size_t> finished = 0;
std::atomic<Epoch> currentEpoch = 0; std::atomic<Epoch> currentEpoch = 0;
std_support::vector<std_support::unique_ptr<Mutator>> mutators; std::vector<std::unique_ptr<Mutator>> mutators;
for (int i = 0; i < kDefaultThreadCount; ++i) { for (int i = 0; i < kDefaultThreadCount; ++i) {
mutators.emplace_back(std_support::make_unique<Mutator>(*this, [&, i](Mutator&) noexcept { mutators.emplace_back(std::make_unique<Mutator>(*this, [&, i](Mutator&) noexcept {
while (!canStart.load(std::memory_order_relaxed)) { while (!canStart.load(std::memory_order_relaxed)) {
std::this_thread::yield(); std::this_thread::yield();
} }
@@ -385,9 +385,9 @@ TEST_F(MutatorAssistsTest, AssistRequestsByMutatorsIntoTheFuture) {
scheduledEpoch = currentEpoch + 1; scheduledEpoch = currentEpoch + 1;
return scheduledEpoch; return scheduledEpoch;
}; };
std_support::vector<std_support::unique_ptr<Mutator>> mutators; std::vector<std::unique_ptr<Mutator>> mutators;
for (int i = 0; i < kDefaultThreadCount; ++i) { for (int i = 0; i < kDefaultThreadCount; ++i) {
mutators.emplace_back(std_support::make_unique<Mutator>(*this, [&, i](Mutator&) noexcept { mutators.emplace_back(std::make_unique<Mutator>(*this, [&, i](Mutator&) noexcept {
while (!canStart.load(std::memory_order_relaxed)) { while (!canStart.load(std::memory_order_relaxed)) {
std::this_thread::yield(); std::this_thread::yield();
} }
@@ -12,11 +12,11 @@
using namespace kotlin; using namespace kotlin;
gcScheduler::GCScheduler::ThreadData::ThreadData(gcScheduler::GCScheduler&, mm::ThreadData&) noexcept : gcScheduler::GCScheduler::ThreadData::ThreadData(gcScheduler::GCScheduler&, mm::ThreadData&) noexcept :
impl_(std_support::make_unique<Impl>()) {} impl_(std::make_unique<Impl>()) {}
gcScheduler::GCScheduler::ThreadData::~ThreadData() = default; gcScheduler::GCScheduler::ThreadData::~ThreadData() = default;
gcScheduler::GCScheduler::GCScheduler() noexcept : impl_(std_support::make_unique<Impl>()) {} gcScheduler::GCScheduler::GCScheduler() noexcept : impl_(std::make_unique<Impl>()) {}
gcScheduler::GCScheduler::~GCScheduler() = default; gcScheduler::GCScheduler::~GCScheduler() = default;
@@ -14,13 +14,14 @@
* limitations under the License. * limitations under the License.
*/ */
#include <cstdlib>
#include "KString.h" #include "KString.h"
#include "Memory.h" #include "Memory.h"
#include "Natives.h" #include "Natives.h"
#include "Porting.h" #include "Porting.h"
#include "Runtime.h" #include "Runtime.h"
#include "Types.h" #include "Types.h"
#include "std_support/CStdlib.hpp"
#ifdef KONAN_ANDROID #ifdef KONAN_ANDROID
@@ -218,7 +219,7 @@ extern "C" void RUNTIME_USED Konan_main(
ANativeActivity* activity, void* savedState, size_t savedStateSize) { ANativeActivity* activity, void* savedState, size_t savedStateSize) {
bool launchThread = activity->instance == nullptr; bool launchThread = activity->instance == nullptr;
if (launchThread) { if (launchThread) {
launcherState = (LauncherState*)std_support::calloc(sizeof(LauncherState), 1); launcherState = (LauncherState*)std::calloc(sizeof(LauncherState), 1);
launcherState->nativeActivityState = {activity, savedState, savedStateSize, nullptr}; launcherState->nativeActivityState = {activity, savedState, savedStateSize, nullptr};
activity->instance = launcherState; activity->instance = launcherState;
activity->callbacks->onDestroy = onDestroy; activity->callbacks->onDestroy = onDestroy;
@@ -14,13 +14,14 @@
* limitations under the License. * limitations under the License.
*/ */
#include <cstdlib>
#include "Memory.h" #include "Memory.h"
#include "Natives.h" #include "Natives.h"
#include "Runtime.h" #include "Runtime.h"
#include "KString.h" #include "KString.h"
#include "Types.h" #include "Types.h"
#include "Worker.h" #include "Worker.h"
#include "std_support/CStdlib.hpp"
#include "launcher.h" #include "launcher.h"
@@ -26,6 +26,7 @@
#pragma once #pragma once
#include <atomic> #include <atomic>
#include <optional>
#include "Utils.hpp" #include "Utils.hpp"
#include "ManuallyScoped.hpp" #include "ManuallyScoped.hpp"
@@ -7,12 +7,12 @@
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include <list> #include <list>
#include <vector>
#include "IntrusiveList.hpp" #include "IntrusiveList.hpp"
#include "ParallelProcessor.hpp" #include "ParallelProcessor.hpp"
#include "std_support/Vector.hpp"
#include "SingleThreadExecutor.hpp" #include "SingleThreadExecutor.hpp"
#include "TestSupport.hpp" #include "TestSupport.hpp"
@@ -6,6 +6,7 @@
#include "Cleaner.h" #include "Cleaner.h"
#include <future> #include <future>
#include <vector>
#include "gmock/gmock.h" #include "gmock/gmock.h"
#include "gtest/gtest.h" #include "gtest/gtest.h"
@@ -14,7 +15,6 @@
#include "TestSupport.hpp" #include "TestSupport.hpp"
#include "TestSupportCompilerGenerated.hpp" #include "TestSupportCompilerGenerated.hpp"
#include "Types.h" #include "Types.h"
#include "std_support/Vector.hpp"
using namespace kotlin; using namespace kotlin;
using namespace kotlin::test_support; using namespace kotlin::test_support;
@@ -40,7 +40,7 @@ TEST_F(CleanerTest, ConcurrentCreation) {
int startedThreads = 0; int startedThreads = 0;
bool allowRunning = false; bool allowRunning = false;
std_support::vector<std::future<KInt>> futures; std::vector<std::future<KInt>> futures;
for (int i = 0; i < threadCount; ++i) { for (int i = 0; i < threadCount; ++i) {
auto future = std::async(std::launch::async, [&startedThreads, &allowRunning]() { auto future = std::async(std::launch::async, [&startedThreads, &allowRunning]() {
// Thread state switching requires initilized memory subsystem. // Thread state switching requires initilized memory subsystem.
@@ -55,7 +55,7 @@ TEST_F(CleanerTest, ConcurrentCreation) {
while (atomicGet(&startedThreads) != threadCount) { while (atomicGet(&startedThreads) != threadCount) {
} }
atomicSet(&allowRunning, true); atomicSet(&allowRunning, true);
std_support::vector<KInt> values; std::vector<KInt> values;
for (auto& future : futures) { for (auto& future : futures) {
values.push_back(future.get()); values.push_back(future.get());
} }
@@ -11,6 +11,7 @@
#include <shared_mutex> #include <shared_mutex>
#include <tuple> #include <tuple>
#include <type_traits> #include <type_traits>
#include <vector>
#include "gmock/gmock.h" #include "gmock/gmock.h"
#include "gtest/gtest.h" #include "gtest/gtest.h"
@@ -18,7 +19,6 @@
#include "ClockTestSupport.hpp" #include "ClockTestSupport.hpp"
#include "ScopedThread.hpp" #include "ScopedThread.hpp"
#include "TestSupport.hpp" #include "TestSupport.hpp"
#include "std_support/Vector.hpp"
using namespace kotlin; using namespace kotlin;
@@ -1078,7 +1078,7 @@ TEST(ManualClockTest, ConcurrentSleepUntil) {
test_support::manual_clock::reset(); test_support::manual_clock::reset();
constexpr auto threadCount = kDefaultThreadCount; constexpr auto threadCount = kDefaultThreadCount;
std_support::vector<ScopedThread> threads; std::vector<ScopedThread> threads;
std::atomic<bool> run = false; std::atomic<bool> run = false;
std::atomic<int> ready = 0; std::atomic<int> ready = 0;
for (int i = 0; i < threadCount; ++i) { for (int i = 0; i < threadCount; ++i) {
@@ -1103,7 +1103,7 @@ TEST(ManualClockTest, ConcurrentWaits) {
test_support::manual_clock::reset(); test_support::manual_clock::reset();
constexpr auto threadCount = kDefaultThreadCount; constexpr auto threadCount = kDefaultThreadCount;
std_support::vector<ScopedThread> threads; std::vector<ScopedThread> threads;
std::mutex mutex; std::mutex mutex;
std::condition_variable cv; std::condition_variable cv;
std::condition_variable_any cvAny; std::condition_variable_any cvAny;
@@ -14,4 +14,4 @@ std::atomic<test_support::manual_clock::time_point> test_support::manual_clock::
std::mutex test_support::manual_clock::pendingWaitsMutex_; std::mutex test_support::manual_clock::pendingWaitsMutex_;
// static // static
std_support::multiset<test_support::manual_clock::time_point> test_support::manual_clock::pendingWaits_; std::multiset<test_support::manual_clock::time_point> test_support::manual_clock::pendingWaits_;
@@ -8,12 +8,12 @@
#include "Clock.hpp" #include "Clock.hpp"
#include <optional> #include <optional>
#include <set>
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "KAssert.h" #include "KAssert.h"
#include "Utils.hpp" #include "Utils.hpp"
#include "std_support/Set.hpp"
namespace kotlin::test_support { namespace kotlin::test_support {
@@ -89,9 +89,9 @@ private:
private: private:
friend class manual_clock; friend class manual_clock;
explicit PendingWaitRegistration(std_support::multiset<time_point>::iterator it) noexcept : it_(it) {} explicit PendingWaitRegistration(std::multiset<time_point>::iterator it) noexcept : it_(it) {}
std_support::multiset<time_point>::iterator it_; std::multiset<time_point>::iterator it_;
}; };
template <typename Rep, typename Period> template <typename Rep, typename Period>
@@ -104,7 +104,7 @@ private:
static std::atomic<time_point> now_; static std::atomic<time_point> now_;
static std::mutex pendingWaitsMutex_; static std::mutex pendingWaitsMutex_;
static std_support::multiset<time_point> pendingWaits_; static std::multiset<time_point> pendingWaits_;
}; };
} // namespace kotlin::test_support } // namespace kotlin::test_support
@@ -7,13 +7,13 @@
#include <condition_variable> #include <condition_variable>
#include <mutex> #include <mutex>
#include <vector>
#include "gmock/gmock.h" #include "gmock/gmock.h"
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "ScopedThread.hpp" #include "ScopedThread.hpp"
#include "TestSupport.hpp" #include "TestSupport.hpp"
#include "std_support/Vector.hpp"
using namespace kotlin; using namespace kotlin;
@@ -115,7 +115,7 @@ TYPED_TEST(ConditionVariableTest, WaitAll) {
CVUnderTest cv; CVUnderTest cv;
std::atomic<size_t> waiting = 0; std::atomic<size_t> waiting = 0;
std_support::vector<ScopedThread> threads; std::vector<ScopedThread> threads;
for (int i = 0; i < kDefaultThreadCount; ++i) { for (int i = 0; i < kDefaultThreadCount; ++i) {
threads.emplace_back([&] { threads.emplace_back([&] {
std::unique_lock guard(m); std::unique_lock guard(m);
@@ -147,7 +147,7 @@ TYPED_TEST(ConditionVariableTest, WaitAllNotifyUnderLock) {
CVUnderTest cv; CVUnderTest cv;
std::atomic<size_t> waiting = 0; std::atomic<size_t> waiting = 0;
std_support::vector<ScopedThread> threads; std::vector<ScopedThread> threads;
for (int i = 0; i < kDefaultThreadCount; ++i) { for (int i = 0; i < kDefaultThreadCount; ++i) {
threads.emplace_back([&] { threads.emplace_back([&] {
std::unique_lock guard(m); std::unique_lock guard(m);
@@ -235,7 +235,7 @@ TYPED_TEST(ConditionVariableTest, WaitPredicateAll) {
CVUnderTest cv; CVUnderTest cv;
std::atomic<size_t> waiting = 0; std::atomic<size_t> waiting = 0;
std_support::vector<ScopedThread> threads; std::vector<ScopedThread> threads;
for (int i = 0; i < kDefaultThreadCount; ++i) { for (int i = 0; i < kDefaultThreadCount; ++i) {
threads.emplace_back([&] { threads.emplace_back([&] {
std::unique_lock guard(m); std::unique_lock guard(m);
@@ -266,7 +266,7 @@ TYPED_TEST(ConditionVariableTest, WaitPredicateAllNotifyUnderLock) {
CVUnderTest cv; CVUnderTest cv;
std::atomic<size_t> waiting = 0; std::atomic<size_t> waiting = 0;
std_support::vector<ScopedThread> threads; std::vector<ScopedThread> threads;
for (int i = 0; i < kDefaultThreadCount; ++i) { for (int i = 0; i < kDefaultThreadCount; ++i) {
threads.emplace_back([&] { threads.emplace_back([&] {
std::unique_lock guard(m); std::unique_lock guard(m);
@@ -308,7 +308,7 @@ TYPED_TEST(ConditionVariableTest, Checkpoint) {
return epochScheduled; return epochScheduled;
}; };
std_support::vector<ScopedThread> threads; std::vector<ScopedThread> threads;
std::array<std::atomic<uint64_t>, kDefaultThreadCount> checkpoints = {0}; std::array<std::atomic<uint64_t>, kDefaultThreadCount> checkpoints = {0};
for (int i = 0; i < kDefaultThreadCount; ++i) { for (int i = 0; i < kDefaultThreadCount; ++i) {
threads.emplace_back([&, i] { threads.emplace_back([&, i] {
@@ -14,6 +14,8 @@
* limitations under the License. * limitations under the License.
*/ */
#include <cstdio> #include <cstdio>
#include <string>
#include <vector>
#include "KAssert.h" #include "KAssert.h"
#include "Memory.h" #include "Memory.h"
@@ -25,8 +27,6 @@
#ifdef KONAN_ANDROID #ifdef KONAN_ANDROID
#include "CompilerConstants.hpp" #include "CompilerConstants.hpp"
#endif #endif
#include "std_support/String.hpp"
#include "std_support/Vector.hpp"
#include "utf8.h" #include "utf8.h"
@@ -34,12 +34,12 @@ using namespace kotlin;
namespace { namespace {
std_support::string kStringToUtf8(KString message) { std::string kStringToUtf8(KString message) {
if (message->type_info() != theStringTypeInfo) { if (message->type_info() != theStringTypeInfo) {
ThrowClassCastException(message->obj(), theStringTypeInfo); ThrowClassCastException(message->obj(), theStringTypeInfo);
} }
const KChar* utf16 = CharArrayAddressOfElementAt(message, 0); const KChar* utf16 = CharArrayAddressOfElementAt(message, 0);
std_support::string utf8; std::string utf8;
utf8.reserve(message->count_); utf8.reserve(message->count_);
// Replace incorrect sequences with a default codepoint (see utf8::with_replacement::default_replacement) // Replace incorrect sequences with a default codepoint (see utf8::with_replacement::default_replacement)
utf8::with_replacement::utf16to8(utf16, utf16 + message->count_, back_inserter(utf8)); utf8::with_replacement::utf16to8(utf16, utf16 + message->count_, back_inserter(utf8));
@@ -114,7 +114,7 @@ OBJ_GETTER0(Kotlin_io_Console_readLine) {
} }
OBJ_GETTER0(Kotlin_io_Console_readlnOrNull) { OBJ_GETTER0(Kotlin_io_Console_readlnOrNull) {
std_support::vector<char> data; std::vector<char> data;
data.reserve(16); data.reserve(16);
bool isEOF = false; bool isEOF = false;
bool isError = false; bool isError = false;
@@ -16,7 +16,6 @@
#include "ScopedThread.hpp" #include "ScopedThread.hpp"
#include "TestSupportCompilerGenerated.hpp" #include "TestSupportCompilerGenerated.hpp"
#include "TestSupport.hpp" #include "TestSupport.hpp"
#include "std_support/Memory.hpp"
using namespace kotlin; using namespace kotlin;
using namespace testing; using namespace testing;
@@ -306,8 +305,8 @@ namespace {
using NativeHandlerMock = NiceMock<MockFunction<void(void)>>; using NativeHandlerMock = NiceMock<MockFunction<void(void)>>;
using OnUnhandledExceptionMock = NiceMock<MockFunction<void(KRef)>>; using OnUnhandledExceptionMock = NiceMock<MockFunction<void(KRef)>>;
std_support::unique_ptr<NativeHandlerMock> gNativeHandlerMock = nullptr; std::unique_ptr<NativeHandlerMock> gNativeHandlerMock = nullptr;
std_support::unique_ptr<test_support::ScopedMockFunction<void(KRef), /* Strict = */ false>> gOnUnhandledExceptionMock = nullptr; std::unique_ptr<test_support::ScopedMockFunction<void(KRef), /* Strict = */ false>> gOnUnhandledExceptionMock = nullptr;
// Google Test's death tests do not fail in case of a failed EXPECT_*/ASSERT_* check in a death statement. // Google Test's death tests do not fail in case of a failed EXPECT_*/ASSERT_* check in a death statement.
// To workaround it, manually check the conditions to be asserted, log all failed conditions and then // To workaround it, manually check the conditions to be asserted, log all failed conditions and then
@@ -323,7 +322,7 @@ void log(const char* message) noexcept {
} }
NativeHandlerMock& setNativeTerminateHandler() noexcept { NativeHandlerMock& setNativeTerminateHandler() noexcept {
gNativeHandlerMock = std_support::make_unique<NativeHandlerMock>(); gNativeHandlerMock = std::make_unique<NativeHandlerMock>();
std::set_terminate([]() { std::set_terminate([]() {
gNativeHandlerMock->Call(); gNativeHandlerMock->Call();
std::abort(); std::abort();
@@ -332,7 +331,7 @@ NativeHandlerMock& setNativeTerminateHandler() noexcept {
} }
OnUnhandledExceptionMock& setKotlinTerminationHandler() noexcept { OnUnhandledExceptionMock& setKotlinTerminationHandler() noexcept {
gOnUnhandledExceptionMock = std_support::make_unique<test_support::ScopedMockFunction<void(KRef), /* Strict = */ false>>( gOnUnhandledExceptionMock = std::make_unique<test_support::ScopedMockFunction<void(KRef), /* Strict = */ false>>(
ScopedKotlin_runUnhandledExceptionHookMock</* Strict = */ false>()); ScopedKotlin_runUnhandledExceptionHookMock</* Strict = */ false>());
SetKonanTerminateHandler(); SetKonanTerminateHandler();
return gOnUnhandledExceptionMock->get(); return gOnUnhandledExceptionMock->get();
@@ -17,13 +17,10 @@
#include "ExecFormat.h" #include "ExecFormat.h"
#include <cstdio> #include <cstdio>
#include <cstdlib>
#include <vector>
#include "Porting.h" #include "Porting.h"
#include "std_support/CStdlib.hpp"
#include "std_support/New.hpp"
#include "std_support/Vector.hpp"
using namespace kotlin;
#if USE_ELF_SYMBOLS #if USE_ELF_SYMBOLS
@@ -64,7 +61,7 @@ struct SymRecord {
char* strtab; char* strtab;
}; };
typedef std_support::vector<SymRecord> SymRecordList; typedef std::vector<SymRecord> SymRecordList;
SymRecordList* symbols = nullptr; SymRecordList* symbols = nullptr;
@@ -82,7 +79,7 @@ Elf_Ehdr* findElfHeader() {
void initSymbols() { void initSymbols() {
RuntimeAssert(symbols == nullptr, "Init twice"); RuntimeAssert(symbols == nullptr, "Init twice");
symbols = new (std_support::kalloc) SymRecordList(); symbols = new SymRecordList();
Elf_Ehdr* ehdr = findElfHeader(); Elf_Ehdr* ehdr = findElfHeader();
if (ehdr == nullptr) return; if (ehdr == nullptr) return;
RuntimeAssert(strncmp((const char*)ehdr->e_ident, ELFMAG, SELFMAG) == 0, "Must be an ELF"); RuntimeAssert(strncmp((const char*)ehdr->e_ident, ELFMAG, SELFMAG) == 0, "Must be an ELF");
@@ -165,10 +162,10 @@ static void* mapModuleFile(HMODULE hModule) {
DWORD bufferLength = 64; DWORD bufferLength = 64;
wchar_t* buffer = nullptr; wchar_t* buffer = nullptr;
for (;;) { for (;;) {
auto newBuffer = (wchar_t*)std_support::calloc(bufferLength, sizeof(wchar_t)); auto newBuffer = (wchar_t*)std::calloc(bufferLength, sizeof(wchar_t));
RuntimeAssert(newBuffer != nullptr, "Out of memory"); RuntimeAssert(newBuffer != nullptr, "Out of memory");
if (buffer != nullptr) { if (buffer != nullptr) {
std_support::free(buffer); std::free(buffer);
} }
buffer = newBuffer; buffer = newBuffer;
@@ -184,7 +181,7 @@ static void* mapModuleFile(HMODULE hModule) {
} }
// Invalid result. // Invalid result.
std_support::free(buffer); std::free(buffer);
return nullptr; return nullptr;
} }
@@ -197,7 +194,7 @@ static void* mapModuleFile(HMODULE hModule) {
/* dwFlagsAndAttributes = */ FILE_ATTRIBUTE_NORMAL, /* dwFlagsAndAttributes = */ FILE_ATTRIBUTE_NORMAL,
/* hTemplateFile = */ nullptr /* hTemplateFile = */ nullptr
); );
std_support::free(buffer); std::free(buffer);
if (hFile == INVALID_HANDLE_VALUE) { if (hFile == INVALID_HANDLE_VALUE) {
// Can't open module file. // Can't open module file.
return nullptr; return nullptr;
@@ -337,7 +334,7 @@ extern "C" bool AddressToSymbol(const void* address, char* resultBuffer, size_t
int rv = GetModuleHandleExW(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT, int rv = GetModuleHandleExW(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
reinterpret_cast<LPCWSTR>(&AddressToSymbol), &hModule); reinterpret_cast<LPCWSTR>(&AddressToSymbol), &hModule);
RuntimeAssert(rv != 0, "GetModuleHandleExW fails"); RuntimeAssert(rv != 0, "GetModuleHandleExW fails");
theExeSymbolTable = new (std_support::kalloc) SymbolTable(hModule); theExeSymbolTable = new SymbolTable(hModule);
} }
return theExeSymbolTable->functionAddressToSymbol(address, resultBuffer, resultBufferSize, resultOffset); return theExeSymbolTable->functionAddressToSymbol(address, resultBuffer, resultBufferSize, resultOffset);
} }
@@ -23,14 +23,13 @@
#include "Memory.h" #include "Memory.h"
#include "MemorySharedRefs.hpp" #include "MemorySharedRefs.hpp"
#include "Types.h" #include "Types.h"
#include "std_support/New.hpp"
using namespace kotlin; using namespace kotlin;
extern "C" { extern "C" {
KNativePtr Kotlin_Interop_createStablePointer(KRef any) { KNativePtr Kotlin_Interop_createStablePointer(KRef any) {
KRefSharedHolder* holder = new (std_support::kalloc) KRefSharedHolder(); KRefSharedHolder* holder = new KRefSharedHolder();
holder->init(any); holder->init(any);
return holder; return holder;
} }
@@ -38,7 +37,7 @@ KNativePtr Kotlin_Interop_createStablePointer(KRef any) {
void Kotlin_Interop_disposeStablePointer(KNativePtr pointer) { void Kotlin_Interop_disposeStablePointer(KNativePtr pointer) {
KRefSharedHolder* holder = reinterpret_cast<KRefSharedHolder*>(pointer); KRefSharedHolder* holder = reinterpret_cast<KRefSharedHolder*>(pointer);
holder->dispose(); holder->dispose();
std_support::kdelete(holder); delete holder;
} }
OBJ_GETTER(Kotlin_Interop_derefStablePointer, KNativePtr pointer) { OBJ_GETTER(Kotlin_Interop_derefStablePointer, KNativePtr pointer) {
@@ -6,13 +6,13 @@
#include "IntrusiveList.hpp" #include "IntrusiveList.hpp"
#include <forward_list> #include <forward_list>
#include <list>
#include <type_traits> #include <type_traits>
#include "gmock/gmock.h" #include "gmock/gmock.h"
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "Utils.hpp" #include "Utils.hpp"
#include "std_support/List.hpp"
using namespace kotlin; using namespace kotlin;
@@ -68,8 +68,8 @@ private:
}; };
template <typename List> template <typename List>
std_support::list<typename List::value_type> create(std::initializer_list<int> list) { std::list<typename List::value_type> create(std::initializer_list<int> list) {
std_support::list<typename List::value_type> result; std::list<typename List::value_type> result;
for (auto x : list) { for (auto x : list) {
result.emplace_back(x); result.emplace_back(x);
} }
@@ -15,8 +15,10 @@
*/ */
#include <cstdio> #include <cstdio>
#include <cstdlib>
#include <limits> #include <limits>
#include <string.h> #include <string.h>
#include <string>
#include "KAssert.h" #include "KAssert.h"
#include "Exceptions.h" #include "Exceptions.h"
@@ -25,8 +27,6 @@
#include "KString.h" #include "KString.h"
#include "Porting.h" #include "Porting.h"
#include "Types.h" #include "Types.h"
#include "std_support/CStdlib.hpp"
#include "std_support/String.hpp"
#include "utf8.h" #include "utf8.h"
@@ -36,7 +36,7 @@ using namespace kotlin;
namespace { namespace {
typedef std::back_insert_iterator<std_support::string> KStdStringInserter; typedef std::back_insert_iterator<std::string> KStdStringInserter;
typedef KChar* utf8to16(const char*, const char*, KChar*); typedef KChar* utf8to16(const char*, const char*, KChar*);
typedef KStdStringInserter utf16to8(const KChar*,const KChar*, KStdStringInserter); typedef KStdStringInserter utf16to8(const KChar*,const KChar*, KStdStringInserter);
@@ -62,7 +62,7 @@ template<utf16to8 conversion>
OBJ_GETTER(unsafeUtf16ToUtf8Impl, KString thiz, KInt start, KInt size) { OBJ_GETTER(unsafeUtf16ToUtf8Impl, KString thiz, KInt start, KInt size) {
RuntimeAssert(thiz->type_info() == theStringTypeInfo, "Must use String"); RuntimeAssert(thiz->type_info() == theStringTypeInfo, "Must use String");
const KChar* utf16 = CharArrayAddressOfElementAt(thiz, start); const KChar* utf16 = CharArrayAddressOfElementAt(thiz, start);
std_support::string utf8; std::string utf8;
utf8.reserve(size); utf8.reserve(size);
conversion(utf16, utf16 + size, back_inserter(utf8)); conversion(utf16, utf16 + size, back_inserter(utf8));
ArrayHeader* result = AllocArrayInstance(theByteArrayTypeInfo, utf8.size(), OBJ_RESULT)->array(); ArrayHeader* result = AllocArrayInstance(theByteArrayTypeInfo, utf8.size(), OBJ_RESULT)->array();
@@ -127,16 +127,16 @@ char* CreateCStringFromString(KConstRef kref) {
if (kref == nullptr) return nullptr; if (kref == nullptr) return nullptr;
KString kstring = kref->array(); KString kstring = kref->array();
const KChar* utf16 = CharArrayAddressOfElementAt(kstring, 0); const KChar* utf16 = CharArrayAddressOfElementAt(kstring, 0);
std_support::string utf8; std::string utf8;
utf8.reserve(kstring->count_); utf8.reserve(kstring->count_);
utf8::unchecked::utf16to8(utf16, utf16 + kstring->count_, back_inserter(utf8)); utf8::unchecked::utf16to8(utf16, utf16 + kstring->count_, back_inserter(utf8));
char* result = reinterpret_cast<char*>(std_support::calloc(1, utf8.size() + 1)); char* result = reinterpret_cast<char*>(std::calloc(1, utf8.size() + 1));
::memcpy(result, utf8.c_str(), utf8.size()); ::memcpy(result, utf8.c_str(), utf8.size());
return result; return result;
} }
void DisposeCString(char* cstring) { void DisposeCString(char* cstring) {
if (cstring) std_support::free(cstring); if (cstring) std::free(cstring);
} }
ObjHeader* CreatePermanentStringFromCString(const char* nullTerminatedUTF8) { ObjHeader* CreatePermanentStringFromCString(const char* nullTerminatedUTF8) {
@@ -148,7 +148,7 @@ ObjHeader* CreatePermanentStringFromCString(const char* nullTerminatedUTF8) {
size_t headerSize = alignUp(sizeof(ArrayHeader), alignof(char16_t)); size_t headerSize = alignUp(sizeof(ArrayHeader), alignof(char16_t));
size_t arraySize = headerSize + count * sizeof(char16_t); size_t arraySize = headerSize + count * sizeof(char16_t);
ArrayHeader* header = (ArrayHeader*)std_support::calloc(arraySize, 1); ArrayHeader* header = (ArrayHeader*)std::calloc(arraySize, 1);
header->obj()->typeInfoOrMeta_ = setPointerBits((TypeInfo *)theStringTypeInfo, OBJECT_TAG_PERMANENT_CONTAINER); header->obj()->typeInfoOrMeta_ = setPointerBits((TypeInfo *)theStringTypeInfo, OBJECT_TAG_PERMANENT_CONTAINER);
header->count_ = count; header->count_ = count;
utf8::with_replacement::utf8to16(nullTerminatedUTF8, end, CharArrayAddressOfElementAt(header, 0)); utf8::with_replacement::utf8to16(nullTerminatedUTF8, end, CharArrayAddressOfElementAt(header, 0));
@@ -157,7 +157,7 @@ ObjHeader* CreatePermanentStringFromCString(const char* nullTerminatedUTF8) {
} }
void FreePermanentStringForTests(ArrayHeader* header) { void FreePermanentStringForTests(ArrayHeader* header) {
std_support::free(header); std::free(header);
} }
// String.kt // String.kt
@@ -4,11 +4,13 @@
*/ */
#include "KString.h" #include "KString.h"
#include "Natives.h"
#include <cstdlib>
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "gmock/gmock.h" #include "gmock/gmock.h"
#include "std_support/CStdlib.hpp"
#include "Natives.h"
using namespace kotlin; using namespace kotlin;
+10 -10
View File
@@ -7,14 +7,14 @@
#include <array> #include <array>
#include <cinttypes> #include <cinttypes>
#include <map>
#include <optional> #include <optional>
#include <string>
#include "CallsChecker.hpp" #include "CallsChecker.hpp"
#include "Format.h" #include "Format.h"
#include "KAssert.h" #include "KAssert.h"
#include "Porting.h" #include "Porting.h"
#include "std_support/Map.hpp"
#include "std_support/String.hpp"
using namespace kotlin; using namespace kotlin;
@@ -53,9 +53,9 @@ std::optional<logging::Level> ParseLevel(std::string_view levelString) noexcept
return std::nullopt; return std::nullopt;
} }
std_support::map<std_support::string, logging::Level> ParseTagsFilter(std::string_view tagsFilter) noexcept { std::map<std::string, logging::Level> ParseTagsFilter(std::string_view tagsFilter) noexcept {
if (tagsFilter.empty()) return {}; if (tagsFilter.empty()) return {};
std_support::map<std_support::string, logging::Level> result; std::map<std::string, logging::Level> result;
std::string_view rest = tagsFilter; std::string_view rest = tagsFilter;
while (!rest.empty()) { while (!rest.empty()) {
auto tag = ParseTag(rest); auto tag = ParseTag(rest);
@@ -75,7 +75,7 @@ std_support::map<std_support::string, logging::Level> ParseTagsFilter(std::strin
konan::consoleErrorf("'. No logging will be performed\n"); konan::consoleErrorf("'. No logging will be performed\n");
return {}; return {};
} }
result.emplace(std_support::string(tag.value->data(), tag.value->size()), *level); result.emplace(std::string(tag.value->data(), tag.value->size()), *level);
} }
return result; return result;
} }
@@ -100,7 +100,7 @@ public:
private: private:
// TODO: Make it more efficient. // TODO: Make it more efficient.
std_support::map<std_support::string, logging::Level> tagLevelMap_; std::map<std::string, logging::Level> tagLevelMap_;
}; };
class StderrLogger : public logging::internal::Logger { class StderrLogger : public logging::internal::Logger {
@@ -153,12 +153,12 @@ struct DefaultLogContext {
} // namespace } // namespace
std_support::unique_ptr<logging::internal::LogFilter> logging::internal::CreateLogFilter(std::string_view tagsFilter) noexcept { std::unique_ptr<logging::internal::LogFilter> logging::internal::CreateLogFilter(std::string_view tagsFilter) noexcept {
return std_support::make_unique<::LogFilter>(tagsFilter); return std::make_unique<::LogFilter>(tagsFilter);
} }
std_support::unique_ptr<logging::internal::Logger> logging::internal::CreateStderrLogger() noexcept { std::unique_ptr<logging::internal::Logger> logging::internal::CreateStderrLogger() noexcept {
return std_support::make_unique<StderrLogger>(); return std::make_unique<StderrLogger>();
} }
std_support::span<char> logging::internal::FormatLogEntry( std_support::span<char> logging::internal::FormatLogEntry(
@@ -8,11 +8,11 @@
#include <cstdarg> #include <cstdarg>
#include <initializer_list> #include <initializer_list>
#include <memory>
#include <string_view> #include <string_view>
#include "Clock.hpp" #include "Clock.hpp"
#include "CompilerConstants.hpp" #include "CompilerConstants.hpp"
#include "std_support/Memory.hpp"
#include "std_support/Span.hpp" #include "std_support/Span.hpp"
namespace kotlin { namespace kotlin {
@@ -35,7 +35,7 @@ public:
virtual bool Enabled(Level level, std_support::span<const char* const> tags) const noexcept = 0; virtual bool Enabled(Level level, std_support::span<const char* const> tags) const noexcept = 0;
}; };
std_support::unique_ptr<LogFilter> CreateLogFilter(std::string_view tagsFilter) noexcept; std::unique_ptr<LogFilter> CreateLogFilter(std::string_view tagsFilter) noexcept;
class Logger { class Logger {
public: public:
@@ -44,7 +44,7 @@ public:
virtual void Log(Level level, std_support::span<const char* const> tags, std::string_view message) const noexcept = 0; virtual void Log(Level level, std_support::span<const char* const> tags, std::string_view message) const noexcept = 0;
}; };
std_support::unique_ptr<Logger> CreateStderrLogger() noexcept; std::unique_ptr<Logger> CreateStderrLogger() noexcept;
std_support::span<char> FormatLogEntry( std_support::span<char> FormatLogEntry(
std_support::span<char> buffer, std_support::span<char> buffer,
@@ -5,11 +5,11 @@
#include "Logging.hpp" #include "Logging.hpp"
#include <vector>
#include "gmock/gmock.h" #include "gmock/gmock.h"
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "std_support/Vector.hpp"
using namespace kotlin; using namespace kotlin;
using ::testing::_; using ::testing::_;
@@ -44,7 +44,7 @@ public:
} }
private: private:
std_support::unique_ptr<logging::internal::LogFilter> logFilter_; std::unique_ptr<logging::internal::LogFilter> logFilter_;
}; };
class MockLogFilter : public logging::internal::LogFilter { class MockLogFilter : public logging::internal::LogFilter {
@@ -217,7 +217,7 @@ private:
}; };
MATCHER_P(TagsAre, tags, "") { MATCHER_P(TagsAre, tags, "") {
std_support::vector<std::string_view> actualTags; std::vector<std::string_view> actualTags;
for (auto tag : arg) { for (auto tag : arg) {
actualTags.push_back(tag); actualTags.push_back(tag);
} }
@@ -8,27 +8,26 @@
#include <atomic> #include <atomic>
#include <list> #include <list>
#include <memory>
#include <mutex> #include <mutex>
#include "Mutex.hpp" #include "Mutex.hpp"
#include "Utils.hpp" #include "Utils.hpp"
#include "std_support/List.hpp"
#include "std_support/Memory.hpp"
namespace kotlin { namespace kotlin {
// A queue that is constructed by collecting subqueues from several `Producer`s. // A queue that is constructed by collecting subqueues from several `Producer`s.
template <typename T, typename Mutex, typename Allocator = std_support::allocator<T>> template <typename T, typename Mutex, typename Allocator = std::allocator<T>>
class MultiSourceQueue { class MultiSourceQueue {
// Using `std_support::list` as it allows to implement `Collect` without memory allocations, // Using `std::list` as it allows to implement `Collect` without memory allocations,
// which is important for GC mark phase. // which is important for GC mark phase.
template <typename U> template <typename U>
using List = std_support::list<U, typename std::allocator_traits<Allocator>::template rebind_alloc<U>>; using List = std::list<U, typename std::allocator_traits<Allocator>::template rebind_alloc<U>>;
public: public:
class Producer; class Producer;
// TODO: Consider switching from `std_support::list` to `SingleLockList` to hide the constructor // TODO: Consider switching from `std::list` to `SingleLockList` to hide the constructor
// and to not store the iterator. // and to not store the iterator.
class Node : private Pinned { class Node : private Pinned {
public: public:
@@ -6,6 +6,7 @@
#include "MultiSourceQueue.hpp" #include "MultiSourceQueue.hpp"
#include <atomic> #include <atomic>
#include <vector>
#include "gmock/gmock.h" #include "gmock/gmock.h"
#include "gtest/gtest.h" #include "gtest/gtest.h"
@@ -13,7 +14,6 @@
#include "StdAllocatorTestSupport.hpp" #include "StdAllocatorTestSupport.hpp"
#include "ScopedThread.hpp" #include "ScopedThread.hpp"
#include "TestSupport.hpp" #include "TestSupport.hpp"
#include "std_support/Vector.hpp"
using namespace kotlin; using namespace kotlin;
@@ -22,8 +22,8 @@ using ::testing::_;
namespace { namespace {
template <typename T, typename Mutex> template <typename T, typename Mutex>
std_support::vector<T> Collect(MultiSourceQueue<T, Mutex>& queue) { std::vector<T> Collect(MultiSourceQueue<T, Mutex>& queue) {
std_support::vector<T> result; std::vector<T> result;
for (const auto& element : queue.LockForIter()) { for (const auto& element : queue.LockForIter()) {
result.push_back(element); result.push_back(element);
} }
@@ -196,8 +196,8 @@ TEST(MultiSourceQueueTest, ConcurrentPublish) {
constexpr int kThreadCount = kDefaultThreadCount; constexpr int kThreadCount = kDefaultThreadCount;
std::atomic<bool> canStart(false); std::atomic<bool> canStart(false);
std::atomic<int> readyCount(0); std::atomic<int> readyCount(0);
std_support::vector<ScopedThread> threads; std::vector<ScopedThread> threads;
std_support::vector<int> expected; std::vector<int> expected;
for (int i = 0; i < kThreadCount; ++i) { for (int i = 0; i < kThreadCount; ++i) {
expected.push_back(i); expected.push_back(i);
@@ -225,8 +225,8 @@ TEST(MultiSourceQueueTest, IterWhileConcurrentPublish) {
constexpr int kStartCount = 50; constexpr int kStartCount = 50;
constexpr int kThreadCount = kDefaultThreadCount; constexpr int kThreadCount = kDefaultThreadCount;
std_support::vector<int> expectedBefore; std::vector<int> expectedBefore;
std_support::vector<int> expectedAfter; std::vector<int> expectedAfter;
IntQueue::Producer producer(queue); IntQueue::Producer producer(queue);
for (int i = 0; i < kStartCount; ++i) { for (int i = 0; i < kStartCount; ++i) {
expectedBefore.push_back(i); expectedBefore.push_back(i);
@@ -238,7 +238,7 @@ TEST(MultiSourceQueueTest, IterWhileConcurrentPublish) {
std::atomic<bool> canStart(false); std::atomic<bool> canStart(false);
std::atomic<int> readyCount(0); std::atomic<int> readyCount(0);
std::atomic<int> startedCount(0); std::atomic<int> startedCount(0);
std_support::vector<ScopedThread> threads; std::vector<ScopedThread> threads;
for (int i = 0; i < kThreadCount; ++i) { for (int i = 0; i < kThreadCount; ++i) {
int j = i + kStartCount; int j = i + kStartCount;
expectedAfter.push_back(j); expectedAfter.push_back(j);
@@ -253,7 +253,7 @@ TEST(MultiSourceQueueTest, IterWhileConcurrentPublish) {
}); });
} }
std_support::vector<int> actualBefore; std::vector<int> actualBefore;
{ {
auto iter = queue.LockForIter(); auto iter = queue.LockForIter();
while (readyCount < kThreadCount) { while (readyCount < kThreadCount) {
@@ -282,7 +282,7 @@ TEST(MultiSourceQueueTest, ConcurrentPublishAndApplyDeletions) {
std::atomic<bool> canStart(false); std::atomic<bool> canStart(false);
std::atomic<int> readyCount(0); std::atomic<int> readyCount(0);
std::atomic<int> startedCount(0); std::atomic<int> startedCount(0);
std_support::vector<ScopedThread> threads; std::vector<ScopedThread> threads;
for (int i = 0; i < kThreadCount; ++i) { for (int i = 0; i < kThreadCount; ++i) {
threads.emplace_back([&queue, i, &canStart, &readyCount, &startedCount]() { threads.emplace_back([&queue, i, &canStart, &readyCount, &startedCount]() {
IntQueue::Producer producer(queue); IntQueue::Producer producer(queue);
@@ -10,7 +10,12 @@
#if KONAN_OBJC_INTEROP #if KONAN_OBJC_INTEROP
#include <cstdlib>
#include <map>
#import <mutex> #import <mutex>
#include <string>
#include <unordered_set>
#include <vector>
#import <Foundation/NSObject.h> #import <Foundation/NSObject.h>
#import <Foundation/NSValue.h> #import <Foundation/NSValue.h>
@@ -33,11 +38,6 @@
#import "Mutex.hpp" #import "Mutex.hpp"
#import "Exceptions.h" #import "Exceptions.h"
#import "Natives.h" #import "Natives.h"
#include "std_support/CStdlib.hpp"
#include "std_support/Map.hpp"
#include "std_support/String.hpp"
#include "std_support/UnorderedSet.hpp"
#include "std_support/Vector.hpp"
using namespace kotlin; using namespace kotlin;
@@ -45,7 +45,7 @@ namespace {
template <typename T> template <typename T>
inline T* konanAllocArray(size_t length) { inline T* konanAllocArray(size_t length) {
return reinterpret_cast<T*>(std_support::calloc(length, sizeof(T))); return reinterpret_cast<T*>(std::calloc(length, sizeof(T)));
} }
} }
@@ -563,7 +563,7 @@ static id Kotlin_ObjCExport_refToRetainedObjC_slowpath(ObjHeader* obj) {
return convertToRetained(obj); return convertToRetained(obj);
} }
static void buildITable(TypeInfo* result, const std_support::map<ClassId, std_support::vector<VTableElement>>& interfaceVTables) { static void buildITable(TypeInfo* result, const std::map<ClassId, std::vector<VTableElement>>& interfaceVTables) {
// Check if can use fast optimistic version - check if the size of the itable could be 2^k and <= 32. // Check if can use fast optimistic version - check if the size of the itable could be 2^k and <= 32.
bool useFastITable; bool useFastITable;
int itableSize = 1; int itableSize = 1;
@@ -597,7 +597,7 @@ static void buildITable(TypeInfo* result, const std_support::map<ClassId, std_su
} }
} else { } else {
// Otherwise: conservative version. // Otherwise: conservative version.
// The table will be sorted since we're using std_support::map. // The table will be sorted since we're using std::map.
int index = 0; int index = 0;
for (auto& pair : interfaceVTables) { for (auto& pair : interfaceVTables) {
auto interfaceId = pair.first; auto interfaceId = pair.first;
@@ -623,15 +623,15 @@ static void buildITable(TypeInfo* result, const std_support::map<ClassId, std_su
static const TypeInfo* createTypeInfo( static const TypeInfo* createTypeInfo(
const char* className, const char* className,
const TypeInfo* superType, const TypeInfo* superType,
const std_support::vector<const TypeInfo*>& superInterfaces, const std::vector<const TypeInfo*>& superInterfaces,
const std_support::vector<VTableElement>& vtable, const std::vector<VTableElement>& vtable,
const std_support::map<ClassId, std_support::vector<VTableElement>>& interfaceVTables, const std::map<ClassId, std::vector<VTableElement>>& interfaceVTables,
const InterfaceTableRecord* superItable, const InterfaceTableRecord* superItable,
int superItableSize, int superItableSize,
bool itableEqualsSuper, bool itableEqualsSuper,
const TypeInfo* fieldsInfo const TypeInfo* fieldsInfo
) { ) {
TypeInfo* result = (TypeInfo*)std_support::calloc(1, sizeof(TypeInfo) + vtable.size() * sizeof(void*)); TypeInfo* result = (TypeInfo*)std::calloc(1, sizeof(TypeInfo) + vtable.size() * sizeof(void*));
result->typeInfo_ = result; result->typeInfo_ = result;
result->flags_ = TF_OBJC_DYNAMIC; result->flags_ = TF_OBJC_DYNAMIC;
@@ -656,10 +656,10 @@ static const TypeInfo* createTypeInfo(
result->classId_ = superType->classId_; result->classId_ = superType->classId_;
std_support::vector<const TypeInfo*> implementedInterfaces( std::vector<const TypeInfo*> implementedInterfaces(
superType->implementedInterfaces_, superType->implementedInterfaces_ + superType->implementedInterfacesCount_ superType->implementedInterfaces_, superType->implementedInterfaces_ + superType->implementedInterfacesCount_
); );
std_support::unordered_set<const TypeInfo*> usedInterfaces(implementedInterfaces.begin(), implementedInterfaces.end()); std::unordered_set<const TypeInfo*> usedInterfaces(implementedInterfaces.begin(), implementedInterfaces.end());
for (const TypeInfo* interface : superInterfaces) { for (const TypeInfo* interface : superInterfaces) {
if (usedInterfaces.insert(interface).second) { if (usedInterfaces.insert(interface).second) {
@@ -685,14 +685,14 @@ static const TypeInfo* createTypeInfo(
result->packageName_ = nullptr; result->packageName_ = nullptr;
result->relativeName_ = CreatePermanentStringFromCString(className); result->relativeName_ = CreatePermanentStringFromCString(className);
result->writableInfo_ = (WritableTypeInfo*)std_support::calloc(1, sizeof(WritableTypeInfo)); result->writableInfo_ = (WritableTypeInfo*)std::calloc(1, sizeof(WritableTypeInfo));
for (size_t i = 0; i < vtable.size(); ++i) result->vtable()[i] = vtable[i]; for (size_t i = 0; i < vtable.size(); ++i) result->vtable()[i] = vtable[i];
return result; return result;
} }
static void addDefinedSelectors(Class clazz, std_support::unordered_set<SEL>& result) { static void addDefinedSelectors(Class clazz, std::unordered_set<SEL>& result) {
unsigned int objcMethodCount; unsigned int objcMethodCount;
Method* objcMethods = class_copyMethodList(clazz, &objcMethodCount); Method* objcMethods = class_copyMethodList(clazz, &objcMethodCount);
@@ -703,10 +703,10 @@ static void addDefinedSelectors(Class clazz, std_support::unordered_set<SEL>& re
if (objcMethods != nullptr) free(objcMethods); if (objcMethods != nullptr) free(objcMethods);
} }
static std_support::vector<const TypeInfo*> getProtocolsAsInterfaces(Class clazz) { static std::vector<const TypeInfo*> getProtocolsAsInterfaces(Class clazz) {
std_support::vector<const TypeInfo*> result; std::vector<const TypeInfo*> result;
std_support::unordered_set<Protocol*> handledProtocols; std::unordered_set<Protocol*> handledProtocols;
std_support::vector<Protocol*> protocolsToHandle; std::vector<Protocol*> protocolsToHandle;
{ {
unsigned int protocolCount; unsigned int protocolCount;
@@ -764,7 +764,7 @@ static void throwIfCantBeOverridden(Class clazz, const KotlinToObjCMethodAdapter
static const TypeInfo* createTypeInfo(Class clazz, const TypeInfo* superType, const TypeInfo* fieldsInfo) { static const TypeInfo* createTypeInfo(Class clazz, const TypeInfo* superType, const TypeInfo* fieldsInfo) {
kotlin::NativeOrUnregisteredThreadGuard threadStateGuard(/* reentrant = */ true); kotlin::NativeOrUnregisteredThreadGuard threadStateGuard(/* reentrant = */ true);
std_support::unordered_set<SEL> definedSelectors; std::unordered_set<SEL> definedSelectors;
addDefinedSelectors(clazz, definedSelectors); addDefinedSelectors(clazz, definedSelectors);
const ObjCTypeAdapter* superTypeAdapter = getTypeAdapter(superType); const ObjCTypeAdapter* superTypeAdapter = getTypeAdapter(superType);
@@ -787,7 +787,7 @@ static const TypeInfo* createTypeInfo(Class clazz, const TypeInfo* superType, co
if (superVtable == nullptr) superVtable = superType->vtable(); if (superVtable == nullptr) superVtable = superType->vtable();
std_support::vector<const void*> vtable( std::vector<const void*> vtable(
superVtable, superVtable,
superVtable + superVtableSize superVtable + superVtableSize
); );
@@ -796,7 +796,7 @@ static const TypeInfo* createTypeInfo(Class clazz, const TypeInfo* superType, co
superITable = superType->interfaceTable_; superITable = superType->interfaceTable_;
superITableSize = superType->interfaceTableSize_; superITableSize = superType->interfaceTableSize_;
} }
std_support::map<ClassId, std_support::vector<VTableElement>> interfaceVTables; std::map<ClassId, std::vector<VTableElement>> interfaceVTables;
if (superITable != nullptr) { if (superITable != nullptr) {
int actualItableSize = superITableSize >= 0 ? superITableSize + 1 : -superITableSize; int actualItableSize = superITableSize >= 0 ? superITableSize + 1 : -superITableSize;
for (int i = 0; i < actualItableSize; ++i) { for (int i = 0; i < actualItableSize; ++i) {
@@ -804,16 +804,16 @@ static const TypeInfo* createTypeInfo(Class clazz, const TypeInfo* superType, co
auto interfaceId = record.id; auto interfaceId = record.id;
if (interfaceId == kInvalidInterfaceId) continue; if (interfaceId == kInvalidInterfaceId) continue;
int vtableSize = record.vtableSize; int vtableSize = record.vtableSize;
std_support::vector<VTableElement> interfaceVTable(vtableSize); std::vector<VTableElement> interfaceVTable(vtableSize);
for (int j = 0; j < vtableSize; ++j) for (int j = 0; j < vtableSize; ++j)
interfaceVTable[j] = record.vtable[j]; interfaceVTable[j] = record.vtable[j];
interfaceVTables.emplace(interfaceId, std::move(interfaceVTable)); interfaceVTables.emplace(interfaceId, std::move(interfaceVTable));
} }
} }
std_support::vector<const TypeInfo*> addedInterfaces = getProtocolsAsInterfaces(clazz); std::vector<const TypeInfo*> addedInterfaces = getProtocolsAsInterfaces(clazz);
std_support::vector<const TypeInfo*> supers( std::vector<const TypeInfo*> supers(
superType->implementedInterfaces_, superType->implementedInterfaces_,
superType->implementedInterfaces_ + superType->implementedInterfacesCount_ superType->implementedInterfaces_ + superType->implementedInterfacesCount_
); );
@@ -838,7 +838,7 @@ static const TypeInfo* createTypeInfo(Class clazz, const TypeInfo* superType, co
auto interfaceVTablesIt = interfaceVTables.find(interfaceId); auto interfaceVTablesIt = interfaceVTables.find(interfaceId);
if (interfaceVTablesIt == interfaceVTables.end()) { if (interfaceVTablesIt == interfaceVTables.end()) {
itableEqualsSuper = false; itableEqualsSuper = false;
interfaceVTables.emplace(interfaceId, std_support::vector<VTableElement>(itableSize)); interfaceVTables.emplace(interfaceId, std::vector<VTableElement>(itableSize));
} else { } else {
auto const& interfaceVTable = interfaceVTablesIt->second; auto const& interfaceVTable = interfaceVTablesIt->second;
RuntimeAssert(interfaceVTable.size() == static_cast<size_t>(itableSize), ""); RuntimeAssert(interfaceVTable.size() == static_cast<size_t>(itableSize), "");
@@ -955,7 +955,7 @@ static Class createClass(const TypeInfo* typeInfo, Class superClass) {
kotlin::NativeOrUnregisteredThreadGuard threadStateGuard(/* reentrant = */ true); kotlin::NativeOrUnregisteredThreadGuard threadStateGuard(/* reentrant = */ true);
int classIndex = (anonymousClassNextId++); int classIndex = (anonymousClassNextId++);
std_support::string className = Kotlin_ObjCInterop_getUniquePrefix(); std::string className = Kotlin_ObjCInterop_getUniquePrefix();
className += "_kobjcc"; className += "_kobjcc";
className += std::to_string(classIndex); className += std::to_string(classIndex);
@@ -975,7 +975,7 @@ static Class createClass(const TypeInfo* typeInfo, Class superClass) {
} }
} }
std_support::unordered_set<const TypeInfo*> superImplementedInterfaces( std::unordered_set<const TypeInfo*> superImplementedInterfaces(
typeInfo->superType_->implementedInterfaces_, typeInfo->superType_->implementedInterfaces_,
typeInfo->superType_->implementedInterfaces_ + typeInfo->superType_->implementedInterfacesCount_ typeInfo->superType_->implementedInterfaces_ + typeInfo->superType_->implementedInterfacesCount_
); );
@@ -14,6 +14,7 @@
#include <cstdio> #include <cstdio>
#include <cstdint> #include <cstdint>
#include <mutex> #include <mutex>
#include <string>
#include "Memory.h" #include "Memory.h"
#include "MemorySharedRefs.hpp" #include "MemorySharedRefs.hpp"
@@ -25,7 +26,6 @@
#include "StackTrace.hpp" #include "StackTrace.hpp"
#include "Types.h" #include "Types.h"
#include "Mutex.hpp" #include "Mutex.hpp"
#include "std_support/String.hpp"
using namespace kotlin; using namespace kotlin;
@@ -240,7 +240,7 @@ NO_EXTERNAL_CALLS_CHECK static Class allocateClass(const KotlinObjCClassInfo* in
fprintf(stderr, "Class %s has multiple implementations. Which one will be used is undefined.\n", info->name); fprintf(stderr, "Class %s has multiple implementations. Which one will be used is undefined.\n", info->name);
} }
std_support::string className = Kotlin_ObjCInterop_getUniquePrefix(); std::string className = Kotlin_ObjCInterop_getUniquePrefix();
if (info->name != nullptr) { if (info->name != nullptr) {
className += info->name; className += info->name;
@@ -5,13 +5,13 @@
#include <array> #include <array>
#include <type_traits> #include <type_traits>
#include <vector>
#include "KAssert.h" #include "KAssert.h"
#include "Memory.h" #include "Memory.h"
#include "TypeInfo.h" #include "TypeInfo.h"
#include "Types.h" #include "Types.h"
#include "Utils.hpp" #include "Utils.hpp"
#include "std_support/Vector.hpp"
namespace kotlin { namespace kotlin {
namespace test_support { namespace test_support {
@@ -27,7 +27,7 @@ private:
virtual ~Builder() = default; virtual ~Builder() = default;
int32_t instanceSize_ = 0; int32_t instanceSize_ = 0;
std_support::vector<int32_t> objOffsets_; std::vector<int32_t> objOffsets_;
int32_t objOffsetsCount_ = 0; int32_t objOffsetsCount_ = 0;
int32_t flags_ = 0; int32_t flags_ = 0;
int32_t instanceAlignment_ = 8; int32_t instanceAlignment_ = 8;
@@ -98,7 +98,7 @@ public:
private: private:
TypeInfo typeInfo_{}; TypeInfo typeInfo_{};
std_support::vector<int32_t> objOffsets_; std::vector<int32_t> objOffsets_;
}; };
class Any : private Pinned { class Any : private Pinned {
@@ -5,12 +5,13 @@
#include "ObjectTestSupport.hpp" #include "ObjectTestSupport.hpp"
#include <vector>
#include "gmock/gmock.h" #include "gmock/gmock.h"
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "Natives.h" #include "Natives.h"
#include "TestSupport.hpp" #include "TestSupport.hpp"
#include "std_support/Vector.hpp"
using namespace kotlin; using namespace kotlin;
@@ -69,8 +70,8 @@ using ObjectTestCases = testing::Types<RegularObjectTestCase, IrregularObjectTes
TYPED_TEST_SUITE(ObjectTestSupportObjectTest, ObjectTestCases, ObjectTestCaseNames); TYPED_TEST_SUITE(ObjectTestSupportObjectTest, ObjectTestCases, ObjectTestCaseNames);
template <typename Payload> template <typename Payload>
std_support::vector<ObjHeader**> Collect(test_support::Object<Payload>& object) { std::vector<ObjHeader**> Collect(test_support::Object<Payload>& object) {
std_support::vector<ObjHeader**> result; std::vector<ObjHeader**> result;
for (auto& field : object.fields()) { for (auto& field : object.fields()) {
result.push_back(&field); result.push_back(&field);
} }
@@ -308,8 +309,8 @@ using ArrayTestCases = testing::Types<
TYPED_TEST_SUITE(ObjectTestSupportArrayTest, ArrayTestCases, ArrayTestCaseNames); TYPED_TEST_SUITE(ObjectTestSupportArrayTest, ArrayTestCases, ArrayTestCaseNames);
template <typename Payload, size_t ElementCount> template <typename Payload, size_t ElementCount>
std_support::vector<Payload*> Collect(test_support::internal::Array<Payload, ElementCount>& array) { std::vector<Payload*> Collect(test_support::internal::Array<Payload, ElementCount>& array) {
std_support::vector<Payload*> result; std::vector<Payload*> result;
for (auto& element : array.elements()) { for (auto& element : array.elements()) {
result.push_back(&element); result.push_back(&element);
} }
@@ -330,7 +331,7 @@ TYPED_TEST(ObjectTestSupportArrayTest, Local) {
EXPECT_THAT(array.arrayHeader()->count_, size); EXPECT_THAT(array.arrayHeader()->count_, size);
EXPECT_THAT(array.elements().size(), size); EXPECT_THAT(array.elements().size(), size);
std_support::vector<Payload*> expected; std::vector<Payload*> expected;
for (size_t i = 0; i < size; ++i) { for (size_t i = 0; i < size; ++i) {
auto* element = AddressOfElementAt<Payload>(array.arrayHeader(), i); auto* element = AddressOfElementAt<Payload>(array.arrayHeader(), i);
EXPECT_THAT(&array.elements()[i], element); EXPECT_THAT(&array.elements()[i], element);
@@ -361,7 +362,7 @@ TYPED_TEST(ObjectTestSupportArrayTest, Heap) {
EXPECT_THAT(array.arrayHeader()->count_, size); EXPECT_THAT(array.arrayHeader()->count_, size);
EXPECT_THAT(array.elements().size(), size); EXPECT_THAT(array.elements().size(), size);
std_support::vector<Payload*> expected; std::vector<Payload*> expected;
for (size_t i = 0; i < size; ++i) { for (size_t i = 0; i < size; ++i) {
auto* element = AddressOfElementAt<Payload>(array.arrayHeader(), i); auto* element = AddressOfElementAt<Payload>(array.arrayHeader(), i);
EXPECT_THAT(&array.elements()[i], element); EXPECT_THAT(&array.elements()[i], element);
@@ -3,16 +3,15 @@
* that can be found in the LICENSE file. * that can be found in the LICENSE file.
*/ */
#include "ParallelProcessor.hpp"
#include "gmock/gmock.h" #include "gmock/gmock.h"
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include <list> #include <list>
#include <vector>
#include "IntrusiveList.hpp" #include "IntrusiveList.hpp"
#include "ParallelProcessor.hpp"
#include "std_support/Vector.hpp"
#include "SingleThreadExecutor.hpp" #include "SingleThreadExecutor.hpp"
#include "TestSupport.hpp" #include "TestSupport.hpp"
@@ -18,9 +18,9 @@
#include <android/log.h> #include <android/log.h>
#endif #endif
#include <cstdio> #include <cstdio>
#include <cstdlib>
#include <stdarg.h> #include <stdarg.h>
#include <stdint.h> #include <stdint.h>
#include <stdlib.h>
#include <string.h> #include <string.h>
#include <pthread.h> #include <pthread.h>
#include <unistd.h> #include <unistd.h>
@@ -34,7 +34,6 @@
#include "CompilerConstants.hpp" #include "CompilerConstants.hpp"
#include "Porting.h" #include "Porting.h"
#include "KAssert.h" #include "KAssert.h"
#include "std_support/CStdlib.hpp"
using namespace kotlin; using namespace kotlin;
@@ -185,7 +184,7 @@ static void onThreadExitCallback(void* value) {
while (record != nullptr) { while (record != nullptr) {
record->destructor(record->destructorParameter); record->destructor(record->destructorParameter);
auto next = record->next; auto next = record->next;
std_support::free(record); std::free(record);
record = next; record = next;
} }
} }
@@ -213,7 +212,7 @@ static void onThreadExitInit() {
void onThreadExit(void (*destructor)(void*), void* destructorParameter) { void onThreadExit(void (*destructor)(void*), void* destructorParameter) {
// We cannot use pthread_cleanup_push() as it is lexical scope bound. // We cannot use pthread_cleanup_push() as it is lexical scope bound.
pthread_once(&terminationKeyOnceControl, onThreadExitInit); pthread_once(&terminationKeyOnceControl, onThreadExitInit);
DestructorRecord* destructorRecord = (DestructorRecord*)std_support::calloc(1, sizeof(DestructorRecord)); DestructorRecord* destructorRecord = (DestructorRecord*)std::calloc(1, sizeof(DestructorRecord));
destructorRecord->destructor = destructor; destructorRecord->destructor = destructor;
destructorRecord->destructorParameter = destructorParameter; destructorRecord->destructorParameter = destructorParameter;
destructorRecord->next = destructorRecord->next =
@@ -16,7 +16,6 @@
#include "RuntimePrivate.hpp" #include "RuntimePrivate.hpp"
#include "Worker.h" #include "Worker.h"
#include "KString.h" #include "KString.h"
#include "std_support/New.hpp"
#include <atomic> #include <atomic>
#include <cstdlib> #include <cstdlib>
#include <thread> #include <thread>
@@ -86,7 +85,7 @@ volatile GlobalRuntimeStatus globalRuntimeStatus = kGlobalRuntimeUninitialized;
RuntimeState* initRuntime() { RuntimeState* initRuntime() {
SetKonanTerminateHandler(); SetKonanTerminateHandler();
initObjectPool(); initObjectPool();
RuntimeState* result = new (std_support::kalloc) RuntimeState(); RuntimeState* result = new RuntimeState();
if (!result) return kInvalidRuntime; if (!result) return kInvalidRuntime;
RuntimeCheck(!isValidRuntime(), "No active runtimes allowed"); RuntimeCheck(!isValidRuntime(), "No active runtimes allowed");
::runtimeState = result; ::runtimeState = result;
@@ -177,7 +176,7 @@ void deinitRuntime(RuntimeState* state, bool destroyRuntime) {
// Do not use ThreadStateGuard because memoryState will be destroyed during DeinitMemory. // Do not use ThreadStateGuard because memoryState will be destroyed during DeinitMemory.
kotlin::SwitchThreadState(state->memoryState, kotlin::ThreadState::kNative); kotlin::SwitchThreadState(state->memoryState, kotlin::ThreadState::kNative);
DeinitMemory(state->memoryState, destroyRuntime); DeinitMemory(state->memoryState, destroyRuntime);
std_support::kdelete(state); delete state;
WorkerDestroyThreadDataIfNeeded(workerId); WorkerDestroyThreadDataIfNeeded(workerId);
::runtimeState = kInvalidRuntime; ::runtimeState = kInvalidRuntime;
} }
@@ -7,11 +7,11 @@
#include <functional> #include <functional>
#include <optional> #include <optional>
#include <string>
#include <string_view> #include <string_view>
#include <thread> #include <thread>
#include "Utils.hpp" #include "Utils.hpp"
#include "std_support/String.hpp"
namespace kotlin { namespace kotlin {
namespace internal { namespace internal {
@@ -44,7 +44,7 @@ public:
private: private:
friend class ScopedThread; friend class ScopedThread;
std::optional<std_support::string> name_; std::optional<std::string> name_;
}; };
ScopedThread() noexcept = default; ScopedThread() noexcept = default;
@@ -19,14 +19,14 @@
namespace kotlin { namespace kotlin {
// TODO: Consider different locking mechanisms. // TODO: Consider different locking mechanisms.
template <typename Value, typename Mutex, typename Allocator = std_support::allocator<Value>> template <typename Value, typename Mutex, typename Allocator = std::allocator<Value>>
class SingleLockList : private Pinned { class SingleLockList : private Pinned {
public: public:
class Node; class Node;
private: private:
using NodeAllocator = typename std::allocator_traits<Allocator>::template rebind_alloc<Node>; using NodeAllocator = typename std::allocator_traits<Allocator>::template rebind_alloc<Node>;
using NodeOwner = std_support::unique_ptr<Node, std_support::allocator_deleter<Node, NodeAllocator>>; using NodeOwner = std::unique_ptr<Node, std_support::allocator_deleter<Node, NodeAllocator>>;
public: public:
// TODO: Maybe just hide `Node` altogether? // TODO: Maybe just hide `Node` altogether?
@@ -6,7 +6,9 @@
#include "SingleLockList.hpp" #include "SingleLockList.hpp"
#include <atomic> #include <atomic>
#include <deque>
#include <functional> #include <functional>
#include <vector>
#include "gmock/gmock.h" #include "gmock/gmock.h"
#include "gtest/gtest.h" #include "gtest/gtest.h"
@@ -14,8 +16,6 @@
#include "StdAllocatorTestSupport.hpp" #include "StdAllocatorTestSupport.hpp"
#include "ScopedThread.hpp" #include "ScopedThread.hpp"
#include "TestSupport.hpp" #include "TestSupport.hpp"
#include "std_support/Deque.hpp"
#include "std_support/Vector.hpp"
using namespace kotlin; using namespace kotlin;
@@ -52,7 +52,7 @@ TEST(SingleLockListTest, EmplaceAndIter) {
list.Emplace(kSecond); list.Emplace(kSecond);
list.Emplace(kThird); list.Emplace(kThird);
std_support::vector<int> actual; std::vector<int> actual;
for (int element : list.LockForIter()) { for (int element : list.LockForIter()) {
actual.push_back(element); actual.push_back(element);
} }
@@ -70,7 +70,7 @@ TEST(SingleLockListTest, EmplaceEraseAndIter) {
list.Emplace(kThird); list.Emplace(kThird);
list.Erase(secondNode); list.Erase(secondNode);
std_support::vector<int> actual; std::vector<int> actual;
for (int element : list.LockForIter()) { for (int element : list.LockForIter()) {
actual.push_back(element); actual.push_back(element);
} }
@@ -81,7 +81,7 @@ TEST(SingleLockListTest, EmplaceEraseAndIter) {
TEST(SingleLockListTest, IterEmpty) { TEST(SingleLockListTest, IterEmpty) {
IntList list; IntList list;
std_support::vector<int> actual; std::vector<int> actual;
for (int element : list.LockForIter()) { for (int element : list.LockForIter()) {
actual.push_back(element); actual.push_back(element);
} }
@@ -102,7 +102,7 @@ TEST(SingleLockListTest, EraseToEmptyEmplaceAndIter) {
list.Emplace(kThird); list.Emplace(kThird);
list.Emplace(kFourth); list.Emplace(kFourth);
std_support::vector<int> actual; std::vector<int> actual;
for (int element : list.LockForIter()) { for (int element : list.LockForIter()) {
actual.push_back(element); actual.push_back(element);
} }
@@ -115,8 +115,8 @@ TEST(SingleLockListTest, ConcurrentEmplace) {
constexpr int kThreadCount = kDefaultThreadCount; constexpr int kThreadCount = kDefaultThreadCount;
std::atomic<bool> canStart(false); std::atomic<bool> canStart(false);
std::atomic<int> readyCount(0); std::atomic<int> readyCount(0);
std_support::vector<ScopedThread> threads; std::vector<ScopedThread> threads;
std_support::vector<int> expected; std::vector<int> expected;
for (int i = 0; i < kThreadCount; ++i) { for (int i = 0; i < kThreadCount; ++i) {
expected.push_back(i); expected.push_back(i);
threads.emplace_back([i, &list, &canStart, &readyCount]() { threads.emplace_back([i, &list, &canStart, &readyCount]() {
@@ -132,7 +132,7 @@ TEST(SingleLockListTest, ConcurrentEmplace) {
canStart = true; canStart = true;
threads.clear(); threads.clear();
std_support::vector<int> actual; std::vector<int> actual;
for (int element : list.LockForIter()) { for (int element : list.LockForIter()) {
actual.push_back(element); actual.push_back(element);
} }
@@ -143,14 +143,14 @@ TEST(SingleLockListTest, ConcurrentEmplace) {
TEST(SingleLockListTest, ConcurrentErase) { TEST(SingleLockListTest, ConcurrentErase) {
IntList list; IntList list;
constexpr int kThreadCount = kDefaultThreadCount; constexpr int kThreadCount = kDefaultThreadCount;
std_support::vector<IntList::Node*> items; std::vector<IntList::Node*> items;
for (int i = 0; i < kThreadCount; ++i) { for (int i = 0; i < kThreadCount; ++i) {
items.push_back(list.Emplace(i)); items.push_back(list.Emplace(i));
} }
std::atomic<bool> canStart(false); std::atomic<bool> canStart(false);
std::atomic<int> readyCount(0); std::atomic<int> readyCount(0);
std_support::vector<ScopedThread> threads; std::vector<ScopedThread> threads;
for (auto* item : items) { for (auto* item : items) {
threads.emplace_back([item, &list, &canStart, &readyCount]() { threads.emplace_back([item, &list, &canStart, &readyCount]() {
++readyCount; ++readyCount;
@@ -165,7 +165,7 @@ TEST(SingleLockListTest, ConcurrentErase) {
canStart = true; canStart = true;
threads.clear(); threads.clear();
std_support::vector<int> actual; std::vector<int> actual;
for (int element : list.LockForIter()) { for (int element : list.LockForIter()) {
actual.push_back(element); actual.push_back(element);
} }
@@ -178,8 +178,8 @@ TEST(SingleLockListTest, IterWhileConcurrentEmplace) {
constexpr int kStartCount = 50; constexpr int kStartCount = 50;
constexpr int kThreadCount = kDefaultThreadCount; constexpr int kThreadCount = kDefaultThreadCount;
std_support::deque<int> expectedBefore; std::deque<int> expectedBefore;
std_support::vector<int> expectedAfter; std::vector<int> expectedAfter;
for (int i = 0; i < kStartCount; ++i) { for (int i = 0; i < kStartCount; ++i) {
expectedBefore.push_front(i); expectedBefore.push_front(i);
expectedAfter.push_back(i); expectedAfter.push_back(i);
@@ -188,7 +188,7 @@ TEST(SingleLockListTest, IterWhileConcurrentEmplace) {
std::atomic<bool> canStart(false); std::atomic<bool> canStart(false);
std::atomic<int> startedCount(0); std::atomic<int> startedCount(0);
std_support::vector<ScopedThread> threads; std::vector<ScopedThread> threads;
for (int i = 0; i < kThreadCount; ++i) { for (int i = 0; i < kThreadCount; ++i) {
int j = i + kStartCount; int j = i + kStartCount;
expectedAfter.push_back(j); expectedAfter.push_back(j);
@@ -200,7 +200,7 @@ TEST(SingleLockListTest, IterWhileConcurrentEmplace) {
}); });
} }
std_support::vector<int> actualBefore; std::vector<int> actualBefore;
{ {
auto iter = list.LockForIter(); auto iter = list.LockForIter();
canStart = true; canStart = true;
@@ -216,7 +216,7 @@ TEST(SingleLockListTest, IterWhileConcurrentEmplace) {
EXPECT_THAT(actualBefore, testing::ElementsAreArray(expectedBefore)); EXPECT_THAT(actualBefore, testing::ElementsAreArray(expectedBefore));
std_support::vector<int> actualAfter; std::vector<int> actualAfter;
for (int element : list.LockForIter()) { for (int element : list.LockForIter()) {
actualAfter.push_back(element); actualAfter.push_back(element);
} }
@@ -228,8 +228,8 @@ TEST(SingleLockListTest, IterWhileConcurrentErase) {
IntList list; IntList list;
constexpr int kThreadCount = kDefaultThreadCount; constexpr int kThreadCount = kDefaultThreadCount;
std_support::deque<int> expectedBefore; std::deque<int> expectedBefore;
std_support::vector<IntList::Node*> items; std::vector<IntList::Node*> items;
for (int i = 0; i < kThreadCount; ++i) { for (int i = 0; i < kThreadCount; ++i) {
expectedBefore.push_front(i); expectedBefore.push_front(i);
items.push_back(list.Emplace(i)); items.push_back(list.Emplace(i));
@@ -237,7 +237,7 @@ TEST(SingleLockListTest, IterWhileConcurrentErase) {
std::atomic<bool> canStart(false); std::atomic<bool> canStart(false);
std::atomic<int> startedCount(0); std::atomic<int> startedCount(0);
std_support::vector<ScopedThread> threads; std::vector<ScopedThread> threads;
for (auto* item : items) { for (auto* item : items) {
threads.emplace_back([item, &list, &canStart, &startedCount]() { threads.emplace_back([item, &list, &canStart, &startedCount]() {
while (!canStart) { while (!canStart) {
@@ -247,7 +247,7 @@ TEST(SingleLockListTest, IterWhileConcurrentErase) {
}); });
} }
std_support::vector<int> actualBefore; std::vector<int> actualBefore;
{ {
auto iter = list.LockForIter(); auto iter = list.LockForIter();
canStart = true; canStart = true;
@@ -263,7 +263,7 @@ TEST(SingleLockListTest, IterWhileConcurrentErase) {
EXPECT_THAT(actualBefore, testing::ElementsAreArray(expectedBefore)); EXPECT_THAT(actualBefore, testing::ElementsAreArray(expectedBefore));
std_support::vector<int> actualAfter; std::vector<int> actualAfter;
for (int element : list.LockForIter()) { for (int element : list.LockForIter()) {
actualAfter.push_back(element); actualAfter.push_back(element);
} }
@@ -275,10 +275,10 @@ TEST(SingleLockListTest, LockAndEmplace) {
SingleLockList<int, std::recursive_mutex> list; SingleLockList<int, std::recursive_mutex> list;
constexpr int kThreadCount = kDefaultThreadCount; constexpr int kThreadCount = kDefaultThreadCount;
std_support::vector<ScopedThread> threads; std::vector<ScopedThread> threads;
std_support::vector<int> actualLocked; std::vector<int> actualLocked;
std_support::vector<int> actualUnlocked; std::vector<int> actualUnlocked;
std_support::vector<int> expectedUnlocked; std::vector<int> expectedUnlocked;
for (int i = 0; i < kThreadCount; i++) { for (int i = 0; i < kThreadCount; i++) {
expectedUnlocked.push_back(i); expectedUnlocked.push_back(i);
} }
@@ -314,11 +314,11 @@ TEST(SingleLockListTest, LockAndErase) {
SingleLockList<int, std::recursive_mutex> list; SingleLockList<int, std::recursive_mutex> list;
constexpr int kThreadCount = kDefaultThreadCount; constexpr int kThreadCount = kDefaultThreadCount;
std_support::vector<SingleLockList<int, std::recursive_mutex>::Node*> items; std::vector<SingleLockList<int, std::recursive_mutex>::Node*> items;
std_support::vector<int> expectedLocked; std::vector<int> expectedLocked;
std_support::vector<ScopedThread> threads; std::vector<ScopedThread> threads;
std_support::vector<int> actualLocked; std::vector<int> actualLocked;
std_support::vector<int> actualUnlocked; std::vector<int> actualUnlocked;
std::atomic<int> startedCount(0); std::atomic<int> startedCount(0);
for (int i = 0; i < kThreadCount; i++) { for (int i = 0; i < kThreadCount; i++) {
@@ -377,7 +377,7 @@ TEST(SingleLockListTest, PinnedType) {
list.Erase(itemNode); list.Erase(itemNode);
std_support::vector<PinnedType*> actualAfter; std::vector<PinnedType*> actualAfter;
for (auto& element : list.LockForIter()) { for (auto& element : list.LockForIter()) {
actualAfter.push_back(&element); actualAfter.push_back(&element);
} }
@@ -6,6 +6,7 @@
#pragma once #pragma once
#include <condition_variable> #include <condition_variable>
#include <deque>
#include <functional> #include <functional>
#include <future> #include <future>
#include <mutex> #include <mutex>
@@ -13,7 +14,6 @@
#include "ScopedThread.hpp" #include "ScopedThread.hpp"
#include "Utils.hpp" #include "Utils.hpp"
#include "std_support/Deque.hpp"
namespace kotlin { namespace kotlin {
@@ -101,7 +101,7 @@ private:
std::condition_variable workCV_; std::condition_variable workCV_;
std::mutex workMutex_; std::mutex workMutex_;
std_support::deque<std::packaged_task<void()>> queue_; std::deque<std::packaged_task<void()>> queue_;
bool shutdownRequested_ = false; bool shutdownRequested_ = false;
ScopedThread thread_; ScopedThread thread_;
@@ -5,13 +5,14 @@
#include "SingleThreadExecutor.hpp" #include "SingleThreadExecutor.hpp"
#include <memory>
#include <vector>
#include "gmock/gmock.h" #include "gmock/gmock.h"
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "KAssert.h" #include "KAssert.h"
#include "TestSupport.hpp" #include "TestSupport.hpp"
#include "std_support/Memory.hpp"
#include "std_support/Vector.hpp"
using namespace kotlin; using namespace kotlin;
@@ -62,7 +63,7 @@ TEST(SingleThreadExecutorTest, ContextThreadBound) {
createdContext = &context; createdContext = &context;
createdThread = std::this_thread::get_id(); createdThread = std::this_thread::get_id();
}); });
auto executor = std_support::make_unique<SingleThreadExecutor<PinnedContext>>(); auto executor = std::make_unique<SingleThreadExecutor<PinnedContext>>();
// Make sure context is created. // Make sure context is created.
executor->context(); executor->context();
testing::Mock::VerifyAndClearExpectations(&mocks.ctorMock); testing::Mock::VerifyAndClearExpectations(&mocks.ctorMock);
@@ -129,7 +130,7 @@ TEST(SingleThreadExecutorTest, execute) {
TEST(SingleThreadExecutorTest, DropExecutorWithTasks) { TEST(SingleThreadExecutorTest, DropExecutorWithTasks) {
struct Context {}; struct Context {};
auto executor = std_support::make_unique<SingleThreadExecutor<Context>>(); auto executor = std::make_unique<SingleThreadExecutor<Context>>();
std::mutex taskMutex; std::mutex taskMutex;
testing::StrictMock<testing::MockFunction<void()>> task; testing::StrictMock<testing::MockFunction<void()>> task;
@@ -143,7 +144,7 @@ TEST(SingleThreadExecutorTest, DropExecutorWithTasks) {
auto future = executor->execute(task.AsStdFunction()); auto future = executor->execute(task.AsStdFunction());
while (!taskStarted) {} while (!taskStarted) {}
std_support::vector<std::pair<std::future<void>, bool>> newTasks; std::vector<std::pair<std::future<void>, bool>> newTasks;
constexpr size_t tasksCount = 100; constexpr size_t tasksCount = 100;
for (size_t i = 0; i < tasksCount; ++i) { for (size_t i = 0; i < tasksCount; ++i) {
newTasks.push_back(std::make_pair(executor->execute([&newTasks, i] { newTasks[i].second = true; }), false)); newTasks.push_back(std::make_pair(executor->execute([&newTasks, i] { newTasks[i].second = true; }), false));
@@ -167,14 +168,14 @@ TEST(SingleThreadExecutorTest, DropExecutorWithTasks) {
TEST(SingleThreadExecutorTest, ExecuteFromManyThreads) { TEST(SingleThreadExecutorTest, ExecuteFromManyThreads) {
struct Context { struct Context {
std_support::vector<int> result; std::vector<int> result;
}; };
SingleThreadExecutor<Context> executor; SingleThreadExecutor<Context> executor;
std::atomic_bool canStart = false; std::atomic_bool canStart = false;
std_support::vector<int> expected; std::vector<int> expected;
std_support::vector<ScopedThread> threads; std::vector<ScopedThread> threads;
for (int i = 0; i < kDefaultThreadCount; ++i) { for (int i = 0; i < kDefaultThreadCount; ++i) {
expected.push_back(i); expected.push_back(i);
threads.emplace_back([&, i] { threads.emplace_back([&, i] {
@@ -124,11 +124,11 @@ int getSourceInfo(void* symbol, SourceInfo *result, int result_len) {
// TODO: this implementation is just a hack, e.g. the result is inexact; // TODO: this implementation is just a hack, e.g. the result is inexact;
// however it is better to have an inexact stacktrace than not to have any. // however it is better to have an inexact stacktrace than not to have any.
NO_INLINE std_support::vector<void*> kotlin::internal::GetCurrentStackTrace(size_t skipFrames) noexcept { NO_INLINE std::vector<void*> kotlin::internal::GetCurrentStackTrace(size_t skipFrames) noexcept {
// Skip GetCurrentStackTrace + anything asked by the caller. // Skip GetCurrentStackTrace + anything asked by the caller.
const size_t kSkipFrames = 1 + skipFrames; const size_t kSkipFrames = 1 + skipFrames;
std_support::vector<void*> result; std::vector<void*> result;
#if USE_GCC_UNWIND #if USE_GCC_UNWIND
size_t depth = 0; size_t depth = 0;
_Unwind_Backtrace(depthCountCallback, static_cast<void*>(&depth)); _Unwind_Backtrace(depthCountCallback, static_cast<void*>(&depth));
@@ -257,9 +257,9 @@ KNativePtr adjustAddressForSourceInfo(KNativePtr address) {
KNativePtr adjustAddressForSourceInfo(KNativePtr address) { return address; } KNativePtr adjustAddressForSourceInfo(KNativePtr address) { return address; }
#endif #endif
std_support::vector<std_support::string> kotlin::GetStackTraceStrings(std_support::span<void* const> stackTrace) noexcept { std::vector<std::string> kotlin::GetStackTraceStrings(std_support::span<void* const> stackTrace) noexcept {
size_t size = stackTrace.size(); size_t size = stackTrace.size();
std_support::vector<std_support::string> strings; std::vector<std::string> strings;
strings.reserve(size); strings.reserve(size);
if (size > 0) { if (size > 0) {
SourceInfo buffer[10]; // outside of the loop to avoid calling constructors and destructors each time SourceInfo buffer[10]; // outside of the loop to avoid calling constructors and destructors each time
@@ -6,16 +6,19 @@
#ifndef RUNTIME_STACK_TRACE_H #ifndef RUNTIME_STACK_TRACE_H
#define RUNTIME_STACK_TRACE_H #define RUNTIME_STACK_TRACE_H
#include <algorithm>
#include <string>
#include <vector>
#include "Common.h"
#include "Utils.hpp"
#include "std_support/Span.hpp" #include "std_support/Span.hpp"
#include "Memory.h"
#include "std_support/String.hpp"
#include "std_support/Vector.hpp"
namespace kotlin { namespace kotlin {
namespace internal { namespace internal {
NO_INLINE std_support::vector<void*> GetCurrentStackTrace(size_t skipFrames) noexcept; NO_INLINE std::vector<void*> GetCurrentStackTrace(size_t skipFrames) noexcept;
NO_INLINE size_t GetCurrentStackTrace(size_t skipFrames, std_support::span<void*> buffer) noexcept; NO_INLINE size_t GetCurrentStackTrace(size_t skipFrames, std_support::span<void*> buffer) noexcept;
enum class StackTraceCapacityKind { enum class StackTraceCapacityKind {
@@ -180,18 +183,18 @@ public:
struct TestSupport : private Pinned { struct TestSupport : private Pinned {
static StackTrace constructFrom(std::initializer_list<void*> values) { static StackTrace constructFrom(std::initializer_list<void*> values) {
std_support::vector<void*> traceElements(values); std::vector<void*> traceElements(values);
return StackTrace(std::move(traceElements)); return StackTrace(std::move(traceElements));
} }
}; };
private: private:
explicit StackTrace(std_support::vector<void*>&& buffer) noexcept : buffer_(buffer) {} explicit StackTrace(std::vector<void*>&& buffer) noexcept : buffer_(buffer) {}
std_support::vector<void*> buffer_; std::vector<void*> buffer_;
}; };
std_support::vector<std_support::string> GetStackTraceStrings(std_support::span<void* const> stackTrace) noexcept; std::vector<std::string> GetStackTraceStrings(std_support::span<void* const> stackTrace) noexcept;
// It's not always safe to extract SourceInfo during unhandled exception termination. // It's not always safe to extract SourceInfo during unhandled exception termination.
void DisallowSourceInfo(); void DisallowSourceInfo();
@@ -7,6 +7,7 @@
#include <cstdlib> #include <cstdlib>
#include <signal.h> #include <signal.h>
#include <unordered_set>
#include "gmock/gmock.h" #include "gmock/gmock.h"
#include "gtest/gtest.h" #include "gtest/gtest.h"
@@ -14,9 +15,6 @@
#include "Common.h" #include "Common.h"
#include "Porting.h" #include "Porting.h"
#include "TestSupport.hpp" #include "TestSupport.hpp"
#include "std_support/UnorderedSet.hpp"
#include <iostream>
using namespace kotlin; using namespace kotlin;
@@ -189,7 +187,7 @@ TEST(StackTraceTest, StackAllocatedDeepTraceWithEnoughCapacity) {
TEST(StackTraceTest, Iteration) { TEST(StackTraceTest, Iteration) {
auto stackTrace = GetStackTrace2(); auto stackTrace = GetStackTrace2();
std_support::vector<void*> actualAddresses; std::vector<void*> actualAddresses;
for (auto addr : stackTrace) { for (auto addr : stackTrace) {
actualAddresses.push_back(addr); actualAddresses.push_back(addr);
} }
@@ -205,7 +203,7 @@ TEST(StackTraceTest, Iteration) {
TEST(StackTraceTest, StackAllocatedIteration) { TEST(StackTraceTest, StackAllocatedIteration) {
auto stackTrace = GetStackTrace2<2>(); auto stackTrace = GetStackTrace2<2>();
std_support::vector<void*> actualAddresses; std::vector<void*> actualAddresses;
for (auto addr : stackTrace) { for (auto addr : stackTrace) {
actualAddresses.push_back(addr); actualAddresses.push_back(addr);
} }
@@ -221,7 +219,7 @@ TEST(StackTraceTest, StackAllocatedIteration) {
TEST(StackTraceTest, IndexedAccess) { TEST(StackTraceTest, IndexedAccess) {
auto stackTrace = GetStackTrace2(); auto stackTrace = GetStackTrace2();
std_support::vector<void*> actualAddresses; std::vector<void*> actualAddresses;
for (size_t i = 0; i < stackTrace.size(); i++) { for (size_t i = 0; i < stackTrace.size(); i++) {
actualAddresses.push_back(stackTrace[i]); actualAddresses.push_back(stackTrace[i]);
} }
@@ -235,7 +233,7 @@ TEST(StackTraceTest, IndexedAccess) {
TEST(StackTraceTest, StackAllocatedIndexedAccess) { TEST(StackTraceTest, StackAllocatedIndexedAccess) {
auto stackTrace = GetStackTrace2<2>(); auto stackTrace = GetStackTrace2<2>();
std_support::vector<void*> actualAddresses; std::vector<void*> actualAddresses;
for (size_t i = 0; i < stackTrace.size(); i++) { for (size_t i = 0; i < stackTrace.size(); i++) {
actualAddresses.push_back(stackTrace[i]); actualAddresses.push_back(stackTrace[i]);
} }
@@ -336,7 +334,7 @@ TEST(StackTraceTest, StackAllocatedEqualsAndHash) {
TEST(StackTraceTest, StoreInHashSet) { TEST(StackTraceTest, StoreInHashSet) {
constexpr size_t capacity = 10; constexpr size_t capacity = 10;
std_support::unordered_set<StackTrace<capacity>> set; std::unordered_set<StackTrace<capacity>> set;
StackTrace<capacity> empty; StackTrace<capacity> empty;
StackTrace<capacity> trace1 = GetStackTrace1<capacity>(); StackTrace<capacity> trace1 = GetStackTrace1<capacity>();
StackTrace<capacity> trace2 = GetStackTrace2<capacity>(); StackTrace<capacity> trace2 = GetStackTrace2<capacity>();
@@ -361,7 +359,7 @@ TEST(StackTraceTest, StoreInHashSet) {
} }
TEST(StackTraceTest, StackAllocatedStoreInHashSet) { TEST(StackTraceTest, StackAllocatedStoreInHashSet) {
std_support::unordered_set<StackTrace<>> set; std::unordered_set<StackTrace<>> set;
StackTrace<> empty; StackTrace<> empty;
StackTrace<> trace1 = GetStackTrace1(); StackTrace<> trace1 = GetStackTrace1();
StackTrace<> trace2 = GetStackTrace2(); StackTrace<> trace2 = GetStackTrace2();
@@ -5,6 +5,8 @@
#pragma once #pragma once
#include <cstdlib>
#include <map>
#include <mutex> #include <mutex>
#include <optional> #include <optional>
@@ -12,8 +14,6 @@
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "Utils.hpp" #include "Utils.hpp"
#include "std_support/CStdlib.hpp"
#include "std_support/Map.hpp"
namespace kotlin::test_support { namespace kotlin::test_support {
@@ -26,8 +26,8 @@ public:
class SpyAllocatorCore : private Pinned { class SpyAllocatorCore : private Pinned {
public: public:
SpyAllocatorCore() noexcept { SpyAllocatorCore() noexcept {
ON_CALL(*this, allocate(testing::_)).WillByDefault([](std::size_t size) { return std_support::malloc(size); }); ON_CALL(*this, allocate(testing::_)).WillByDefault([](std::size_t size) { return std::malloc(size); });
ON_CALL(*this, deallocate(testing::_, testing::_)).WillByDefault([](void* ptr, std::size_t size) { std_support::free(ptr); }); ON_CALL(*this, deallocate(testing::_, testing::_)).WillByDefault([](void* ptr, std::size_t size) { std::free(ptr); });
} }
MOCK_METHOD(void*, allocate, (std::size_t), (noexcept)); MOCK_METHOD(void*, allocate, (std::size_t), (noexcept));
@@ -11,7 +11,6 @@
#include "Types.h" #include "Types.h"
#include "Utils.hpp" #include "Utils.hpp"
#include "std_support/Memory.hpp"
namespace kotlin { namespace kotlin {
namespace test_support { namespace test_support {
@@ -35,7 +34,7 @@ public:
explicit ScopedMockFunction(testing::MockFunction<F>** globalMockLocation) : globalMockLocation_(globalMockLocation) { explicit ScopedMockFunction(testing::MockFunction<F>** globalMockLocation) : globalMockLocation_(globalMockLocation) {
RuntimeCheck(globalMockLocation != nullptr, "ScopedMockFunction needs non-null global mock location"); RuntimeCheck(globalMockLocation != nullptr, "ScopedMockFunction needs non-null global mock location");
RuntimeCheck(*globalMockLocation == nullptr, "ScopedMockFunction needs null global mock"); RuntimeCheck(*globalMockLocation == nullptr, "ScopedMockFunction needs null global mock");
mock_ = std_support::make_unique<Mock>(); mock_ = std::make_unique<Mock>();
*globalMockLocation_ = mock_.get(); *globalMockLocation_ = mock_.get();
} }
@@ -70,7 +69,7 @@ public:
private: private:
// Can be null if moved-out of. // Can be null if moved-out of.
testing::MockFunction<F>** globalMockLocation_; testing::MockFunction<F>** globalMockLocation_;
std_support::unique_ptr<Mock> mock_; std::unique_ptr<Mock> mock_;
}; };
template<bool Strict = true> template<bool Strict = true>
+17 -18
View File
@@ -15,8 +15,12 @@
*/ */
#include <cstdlib> #include <cstdlib>
#include <deque>
#include <set>
#include <string.h> #include <string.h>
#include <stdio.h> #include <stdio.h>
#include <unordered_map>
#include <vector>
#include <pthread.h> #include <pthread.h>
#include "PthreadUtils.h" #include "PthreadUtils.h"
@@ -29,11 +33,6 @@
#include "Runtime.h" #include "Runtime.h"
#include "Types.h" #include "Types.h"
#include "Worker.h" #include "Worker.h"
#include "std_support/Deque.hpp"
#include "std_support/New.hpp"
#include "std_support/Set.hpp"
#include "std_support/UnorderedMap.hpp"
#include "std_support/Vector.hpp"
using namespace kotlin; using namespace kotlin;
@@ -129,7 +128,7 @@ struct JobCompare {
// Using multiset instead of regular set, because we compare the jobs only by `whenExecute`. // Using multiset instead of regular set, because we compare the jobs only by `whenExecute`.
// So if `whenExecute` of two different jobs is the same, the jobs are considered equivalent, // So if `whenExecute` of two different jobs is the same, the jobs are considered equivalent,
// and set would simply drop one of them. // and set would simply drop one of them.
typedef std_support::multiset<Job, JobCompare> DelayedJobSet; typedef std::multiset<Job, JobCompare> DelayedJobSet;
} // namespace } // namespace
@@ -196,7 +195,7 @@ class Worker {
KInt id_; KInt id_;
WorkerKind kind_; WorkerKind kind_;
std_support::deque<Job> queue_; std::deque<Job> queue_;
DelayedJobSet delayed_; DelayedJobSet delayed_;
// Stable pointer with worker's name. // Stable pointer with worker's name.
KNativePtr name_; KNativePtr name_;
@@ -364,7 +363,7 @@ class State {
Worker* worker = nullptr; Worker* worker = nullptr;
{ {
Locker locker(&lock_); Locker locker(&lock_);
worker = new (std_support::kalloc) Worker(nextWorkerId(), exceptionHandling, customName, kind); worker = new Worker(nextWorkerId(), exceptionHandling, customName, kind);
if (worker == nullptr) return nullptr; if (worker == nullptr) return nullptr;
workers_[worker->id()] = worker; workers_[worker->id()] = worker;
} }
@@ -397,7 +396,7 @@ class State {
} }
} }
GC_UnregisterWorker(worker); GC_UnregisterWorker(worker);
std_support::kdelete(worker); delete worker;
} }
Future* addJobToWorkerUnlocked( Future* addJobToWorkerUnlocked(
@@ -410,7 +409,7 @@ class State {
if (it == workers_.end()) return nullptr; if (it == workers_.end()) return nullptr;
worker = it->second; worker = it->second;
future = new (std_support::kalloc) Future(nextFutureId()); future = new Future(nextFutureId());
futures_[future->id()] = future; futures_[future->id()] = future;
Job job; Job job;
@@ -512,7 +511,7 @@ class State {
auto it = futures_.find(id); auto it = futures_.find(id);
if (it != futures_.end()) { if (it != futures_.end()) {
futures_.erase(it); futures_.erase(it);
std_support::kdelete(future); delete future;
} }
} }
@@ -586,7 +585,7 @@ class State {
template <typename F> template <typename F>
void waitNativeWorkersTerminationUnlocked(bool checkLeaks, F waitForWorker) { void waitNativeWorkersTerminationUnlocked(bool checkLeaks, F waitForWorker) {
std_support::vector<std::pair<KInt, pthread_t>> workersToWait; std::vector<std::pair<KInt, pthread_t>> workersToWait;
{ {
Locker locker(&lock_); Locker locker(&lock_);
@@ -640,7 +639,7 @@ class State {
} }
OBJ_GETTER0(getActiveWorkers) { OBJ_GETTER0(getActiveWorkers) {
std_support::vector<KInt> workers; std::vector<KInt> workers;
{ {
Locker locker(&lock_); Locker locker(&lock_);
@@ -658,9 +657,9 @@ class State {
private: private:
pthread_mutex_t lock_; pthread_mutex_t lock_;
pthread_cond_t cond_; pthread_cond_t cond_;
std_support::unordered_map<KInt, Future*> futures_; std::unordered_map<KInt, Future*> futures_;
std_support::unordered_map<KInt, Worker*> workers_; std::unordered_map<KInt, Worker*> workers_;
std_support::unordered_map<KInt, pthread_t> terminating_native_workers_; std::unordered_map<KInt, pthread_t> terminating_native_workers_;
KInt currentWorkerId_; KInt currentWorkerId_;
KInt currentFutureId_; KInt currentFutureId_;
KInt currentVersion_; KInt currentVersion_;
@@ -673,11 +672,11 @@ State* theState() {
return state; return state;
} }
State* result = new (std_support::kalloc) State(); State* result = new State();
State* old = __sync_val_compare_and_swap(&state, nullptr, result); State* old = __sync_val_compare_and_swap(&state, nullptr, result);
if (old != nullptr) { if (old != nullptr) {
std_support::kdelete(result); delete result;
// Someone else inited this data. // Someone else inited this data.
return old; return old;
} }
@@ -7,7 +7,6 @@
#include "Memory.h" #include "Memory.h"
#include "MemorySharedRefs.hpp" #include "MemorySharedRefs.hpp"
#include "std_support/New.hpp"
using namespace kotlin; using namespace kotlin;
@@ -29,7 +28,7 @@ RUNTIME_NOTHROW void DisposeWorkerBoundReference(KRef thiz) {
// Can be null if WorkerBoundReference wasn't frozen. // Can be null if WorkerBoundReference wasn't frozen.
if (auto* holder = asWorkerBoundReference(thiz)->holder) { if (auto* holder = asWorkerBoundReference(thiz)->holder) {
holder->dispose(); holder->dispose();
std_support::kdelete(holder); delete holder;
} }
} }
@@ -43,7 +42,7 @@ RUNTIME_NOTHROW void WorkerBoundReferenceFreezeHook(KRef thiz) {
extern "C" { extern "C" {
KNativePtr Kotlin_WorkerBoundReference_create(KRef value) { KNativePtr Kotlin_WorkerBoundReference_create(KRef value) {
auto* holder = new (std_support::kalloc) KRefSharedHolder(); auto* holder = new KRefSharedHolder();
holder->init(value); holder->init(value);
return holder; return holder;
} }
@@ -18,6 +18,7 @@
#include <string.h> #include <string.h>
#include <math.h> #include <math.h>
#include <stdlib.h> #include <stdlib.h>
#include <string>
#include "cbigint.h" #include "cbigint.h"
#include "../Exceptions.h" #include "../Exceptions.h"
@@ -26,8 +27,6 @@
#include "../Porting.h" #include "../Porting.h"
#include "../utf8.h" #include "../utf8.h"
#include "../DoubleConversions.h" #include "../DoubleConversions.h"
#include "../std_support/CStdlib.hpp"
#include "../std_support/String.hpp"
using namespace kotlin; using namespace kotlin;
@@ -179,8 +178,8 @@ static const KDouble tens[] = {
} }
#define ERROR_OCCURED(x) (HIGH_I32_FROM_VAR(x) < 0) #define ERROR_OCCURED(x) (HIGH_I32_FROM_VAR(x) < 0)
#define allocateU64(x, n) if (!((x) = (U_64*) std_support::calloc(1, (n) * sizeof(U_64)))) goto OutOfMemory; #define allocateU64(x, n) if (!((x) = (U_64*) std::calloc(1, (n) * sizeof(U_64)))) goto OutOfMemory;
#define release(r) if ((r)) std_support::free((r)); #define release(r) if ((r)) std::free((r));
/*NB the Number converter methods are synchronized so it is possible to /*NB the Number converter methods are synchronized so it is possible to
*have global data for use by bigIntDigitGenerator */ *have global data for use by bigIntDigitGenerator */
@@ -647,7 +646,7 @@ OutOfMemory:
KDouble Kotlin_native_FloatingPointParser_parseDoubleImpl (KString s, KInt e) KDouble Kotlin_native_FloatingPointParser_parseDoubleImpl (KString s, KInt e)
{ {
const KChar* utf16 = CharArrayAddressOfElementAt(s, 0); const KChar* utf16 = CharArrayAddressOfElementAt(s, 0);
std_support::string utf8; std::string utf8;
utf8.reserve(s->count_); utf8.reserve(s->count_);
try { try {
utf8::utf16to8(utf16, utf16 + s->count_, back_inserter(utf8)); utf8::utf16to8(utf16, utf16 + s->count_, back_inserter(utf8));
@@ -18,6 +18,7 @@
#include <string.h> #include <string.h>
#include <math.h> #include <math.h>
#include <stdlib.h> #include <stdlib.h>
#include <string>
#include "cbigint.h" #include "cbigint.h"
#include "../Exceptions.h" #include "../Exceptions.h"
@@ -25,8 +26,6 @@
#include "../Natives.h" #include "../Natives.h"
#include "../Porting.h" #include "../Porting.h"
#include "../utf8.h" #include "../utf8.h"
#include "../std_support/CStdlib.hpp"
#include "../std_support/String.hpp"
using namespace kotlin; using namespace kotlin;
@@ -121,8 +120,8 @@ static const U_32 tens[] = {
} \ } \
} }
#define allocateU64(x, n) if (!((x) = (U_64*) std_support::calloc(1, (n) * sizeof(U_64)))) goto OutOfMemory; #define allocateU64(x, n) if (!((x) = (U_64*) std::calloc(1, (n) * sizeof(U_64)))) goto OutOfMemory;
#define release(r) if ((r)) std_support::free((r)); #define release(r) if ((r)) std::free((r));
KFloat createFloat(const char *s, KInt e) { KFloat createFloat(const char *s, KInt e) {
/* assumes s is a null terminated string with at least one /* assumes s is a null terminated string with at least one
@@ -546,7 +545,7 @@ extern "C" KFloat
Kotlin_native_FloatingPointParser_parseFloatImpl(KString s, KInt e) Kotlin_native_FloatingPointParser_parseFloatImpl(KString s, KInt e)
{ {
const KChar* utf16 = CharArrayAddressOfElementAt(s, 0); const KChar* utf16 = CharArrayAddressOfElementAt(s, 0);
std_support::string utf8; std::string utf8;
utf8.reserve(s->count_); utf8.reserve(s->count_);
try { try {
utf8::utf16to8(utf16, utf16 + s->count_, back_inserter(utf8)); utf8::utf16to8(utf16, utf16 + s->count_, back_inserter(utf8));
@@ -9,12 +9,11 @@
#import <Foundation/NSNotification.h> #import <Foundation/NSNotification.h>
#import <Foundation/NSString.h> #import <Foundation/NSString.h>
#include <memory>
#include "gmock/gmock.h" #include "gmock/gmock.h"
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "std_support/Memory.hpp"
using namespace kotlin; using namespace kotlin;
using testing::_; using testing::_;
@@ -216,7 +215,7 @@ TEST_F(NSNotificationSubscriptionTest, DestroysHandler) {
EXPECT_CALL(destructorHook, Call(_)).Times(0); EXPECT_CALL(destructorHook, Call(_)).Times(0);
auto subscription = auto subscription =
subscribe(name, [withDestructorHook = std_support::make_shared<WithDestructorHook>(destructorHook.AsStdFunction())] {}); subscribe(name, [withDestructorHook = std::make_shared<WithDestructorHook>(destructorHook.AsStdFunction())] {});
post(name); post(name);
testing::Mock::VerifyAndClearExpectations(&destructorHook); testing::Mock::VerifyAndClearExpectations(&destructorHook);
@@ -7,14 +7,13 @@
#include "ObjectPtr.hpp" #include "ObjectPtr.hpp"
#include <functional>
#import <Foundation/NSObject.h> #import <Foundation/NSObject.h>
#include <functional>
#include <memory>
#include "gmock/gmock.h" #include "gmock/gmock.h"
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "std_support/Memory.hpp"
#include "Utils.hpp" #include "Utils.hpp"
using namespace kotlin; using namespace kotlin;
@@ -38,7 +37,7 @@ private:
} // namespace } // namespace
@interface WithDestructorHookObjC : NSObject { @interface WithDestructorHookObjC : NSObject {
std_support::unique_ptr<WithDestructorHook> impl_; std::unique_ptr<WithDestructorHook> impl_;
} }
@property(readonly) WithDestructorHook* impl; @property(readonly) WithDestructorHook* impl;
@@ -51,7 +50,7 @@ private:
- (instancetype)initWithDestructorHook:(std::function<DestructorHook>)hook { - (instancetype)initWithDestructorHook:(std::function<DestructorHook>)hook {
if ((self = [super init])) { if ((self = [super init])) {
impl_ = std_support::make_unique<WithDestructorHook>(hook); impl_ = std::make_unique<WithDestructorHook>(hook);
} }
return self; return self;
} }
@@ -5,20 +5,13 @@
#include "std_support/CStdlib.hpp" #include "std_support/CStdlib.hpp"
#include <cstdint>
#include <cstdlib>
#include <mm_malloc.h> #include <mm_malloc.h>
#include <unistd.h>
#include "Alignment.hpp" #include "Alignment.hpp"
#include "KAssert.h" #include "KAssert.h"
using namespace kotlin; using namespace kotlin;
void* std_support::malloc(std::size_t size) noexcept {
return std::malloc(size);
}
void* std_support::aligned_malloc(std::size_t alignment, std::size_t size) noexcept { void* std_support::aligned_malloc(std::size_t alignment, std::size_t size) noexcept {
// Enforcing alignment requirements of std::aligned_alloc. // Enforcing alignment requirements of std::aligned_alloc.
RuntimeAssert(IsValidAlignment(alignment), "Invalid alignment %zu", alignment); RuntimeAssert(IsValidAlignment(alignment), "Invalid alignment %zu", alignment);
@@ -26,18 +19,6 @@ void* std_support::aligned_malloc(std::size_t alignment, std::size_t size) noexc
return ::_mm_malloc(size, alignment); return ::_mm_malloc(size, alignment);
} }
void* std_support::calloc(std::size_t num, std::size_t size) noexcept {
return std::calloc(num, size);
}
void* std_support::realloc(void* ptr, std::size_t size) noexcept {
return std::realloc(ptr, size);
}
void std_support::free(void* ptr) noexcept {
return std::free(ptr);
}
void std_support::aligned_free(void* ptr) noexcept { void std_support::aligned_free(void* ptr) noexcept {
return ::_mm_free(ptr); return ::_mm_free(ptr);
} }
@@ -9,17 +9,11 @@
namespace kotlin::std_support { namespace kotlin::std_support {
void* malloc(std::size_t size) noexcept;
// TODO: Replace with `aligned_alloc` that's compatible with normal `free`. // TODO: Replace with `aligned_alloc` that's compatible with normal `free`.
// Allocate aligned memory. Must be freed with `aligned_free`. // Allocate aligned memory. Must be freed with `aligned_free`.
void* aligned_malloc(std::size_t alignment, std::size_t size) noexcept; void* aligned_malloc(std::size_t alignment, std::size_t size) noexcept;
void* calloc(std::size_t num, std::size_t size) noexcept; // Free memory allocated with `aligned_malloc`.
void* realloc(void* ptr, std::size_t size) noexcept;
void free(void* ptr) noexcept;
// Free memory allocated with aligned_malloc.
void aligned_free(void* ptr) noexcept; void aligned_free(void* ptr) noexcept;
} // namespace kotlin::std_support } // namespace kotlin::std_support
@@ -33,79 +33,6 @@ static_assert(
} // namespace } // namespace
TEST(StdSupportCStdlibTest, Malloc) {
{
Struct* ptr = (Struct*)std_support::malloc(sizeof(Struct));
EXPECT_TRUE(IsAligned(ptr, mallocAlignment));
ptr->x = 123;
EXPECT_THAT(ptr->x, 123);
std_support::free(ptr);
}
{
void* ptr = std_support::malloc(0);
EXPECT_TRUE(IsAligned(ptr, mallocAlignment));
std_support::free(ptr);
}
}
TEST(StdSupportCStdlibTest, Free) {
std_support::free(nullptr);
}
TEST(StdSupportCStdlibTest, Calloc) {
{
Struct* ptr = (Struct*)std_support::calloc(sizeof(Struct), 2);
EXPECT_TRUE(IsAligned(ptr, mallocAlignment));
EXPECT_THAT(ptr[0].x, 0);
EXPECT_THAT(ptr[1].x, 0);
ptr[0].x = 123;
ptr[1].x = 42;
EXPECT_THAT(ptr[0].x, 123);
EXPECT_THAT(ptr[1].x, 42);
std_support::free(ptr);
}
{
void* ptr = std_support::calloc(0, 2);
EXPECT_TRUE(IsAligned(ptr, mallocAlignment));
std_support::free(ptr);
}
{
void* ptr = std_support::calloc(sizeof(Struct), 0);
EXPECT_TRUE(IsAligned(ptr, mallocAlignment));
std_support::free(ptr);
}
}
TEST(StdSupportCStdlibTest, Realloc) {
{
Struct* ptr = (Struct*)std_support::malloc(sizeof(Struct));
ptr->x = 123;
Struct* newPtr = (Struct*)std_support::realloc(ptr, sizeof(Struct) * 3);
EXPECT_TRUE(IsAligned(newPtr, mallocAlignment));
EXPECT_THAT(newPtr[0].x, 123);
newPtr[1].x = 42;
newPtr[2].x = 13;
EXPECT_THAT(newPtr[1].x, 42);
EXPECT_THAT(newPtr[2].x, 13);
std_support::free(newPtr);
}
{
Struct* ptr = (Struct*)std_support::calloc(sizeof(Struct), 2);
EXPECT_TRUE(IsAligned(ptr, mallocAlignment));
ptr[0].x = 123;
ptr[1].x = 42;
EXPECT_THAT(ptr[0].x, 123);
EXPECT_THAT(ptr[1].x, 42);
Struct* newPtr = (Struct*)std_support::realloc(ptr, sizeof(Struct) * 3);
EXPECT_TRUE(IsAligned(newPtr, mallocAlignment));
EXPECT_THAT(newPtr[0].x, 123);
EXPECT_THAT(newPtr[1].x, 42);
newPtr[2].x = 13;
EXPECT_THAT(newPtr[2].x, 13);
std_support::free(newPtr);
}
}
TEST(StdSupportCStdlibTest, AlignedMalloc) { TEST(StdSupportCStdlibTest, AlignedMalloc) {
{ {
Struct* ptr = (Struct*)std_support::aligned_malloc(alignof(Struct), sizeof(Struct)); Struct* ptr = (Struct*)std_support::aligned_malloc(alignof(Struct), sizeof(Struct));
@@ -1,17 +0,0 @@
/*
* Copyright 2010-2022 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license
* that can be found in the LICENSE file.
*/
#pragma once
#include <deque>
#include "std_support/Memory.hpp"
namespace kotlin::std_support {
template <typename T, typename Allocator = allocator<T>>
using deque = std::deque<T, Allocator>;
} // namespace kotlin::std_support
@@ -1,17 +0,0 @@
/*
* Copyright 2010-2022 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license
* that can be found in the LICENSE file.
*/
#pragma once
#include <forward_list>
#include "std_support/Memory.hpp"
namespace kotlin::std_support {
template <typename T, typename Allocator = allocator<T>>
using forward_list = std::forward_list<T, Allocator>;
} // namespace kotlin::std_support
@@ -1,17 +0,0 @@
/*
* Copyright 2010-2022 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license
* that can be found in the LICENSE file.
*/
#pragma once
#include <list>
#include "std_support/Memory.hpp"
namespace kotlin::std_support {
template <typename T, typename Allocator = allocator<T>>
using list = std::list<T, Allocator>;
} // namespace kotlin::std_support
@@ -1,20 +0,0 @@
/*
* Copyright 2010-2022 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license
* that can be found in the LICENSE file.
*/
#pragma once
#include <map>
#include "std_support/Memory.hpp"
namespace kotlin::std_support {
template <typename Key, typename T, typename Compare = std::less<Key>, typename Allocator = allocator<std::pair<const Key, T>>>
using map = std::map<Key, T, Compare, Allocator>;
template <typename Key, typename T, typename Compare = std::less<Key>, typename Allocator = allocator<std::pair<const Key, T>>>
using multimap = std::multimap<Key, T, Compare, Allocator>;
} // namespace kotlin::std_support
@@ -9,44 +9,8 @@
#include <memory> #include <memory>
#include <type_traits> #include <type_traits>
#include "std_support/CStdlib.hpp"
namespace kotlin::std_support { namespace kotlin::std_support {
// Default allocator for Kotlin.
// TODO: Consider overriding global operator new and operator delete instead. However, make sure this does
// not extend over to interop.
template <typename T>
struct allocator {
using value_type = T;
using size_type = std::size_t;
using difference_type = std::ptrdiff_t;
using propagate_on_container_move_assignment = std::true_type;
using is_always_equal = std::true_type;
allocator() noexcept = default;
allocator(const allocator&) noexcept = default;
template <typename U>
allocator(const allocator<U>&) noexcept {}
// TODO: maybe malloc, actually?
T* allocate(std::size_t n) noexcept { return static_cast<T*>(std_support::calloc(n, sizeof(T))); }
void deallocate(T* p, std::size_t n) noexcept { std_support::free(p); }
};
template <typename T, typename U>
bool operator==(const allocator<T>&, const allocator<U>&) noexcept {
return true;
}
template <typename T, typename U>
bool operator!=(const allocator<T>&, const allocator<U>&) noexcept {
return false;
}
template <typename T, typename Allocator, typename... Args> template <typename T, typename Allocator, typename... Args>
T* allocator_new(const Allocator& allocator, Args&&... args) { T* allocator_new(const Allocator& allocator, Args&&... args) {
static_assert(!std::is_array_v<T>, "T cannot be an array"); static_assert(!std::is_array_v<T>, "T cannot be an array");
@@ -113,26 +77,6 @@ auto allocate_unique(const Allocator& allocator, Args&&... args) {
return std::unique_ptr<T, TDeleter>(allocator_new<T>(allocator, std::forward<Args>(args)...), TDeleter(allocator)); return std::unique_ptr<T, TDeleter>(allocator_new<T>(allocator, std::forward<Args>(args)...), TDeleter(allocator));
} }
template <typename T>
using default_delete = allocator_deleter<T, allocator<T>>;
template <typename T, typename Deleter = default_delete<T>>
using unique_ptr = std::unique_ptr<T, Deleter>;
template <typename T, typename... Args>
auto make_unique(Args&&... args) {
static_assert(!std::is_array_v<T>, "T cannot be an array");
return allocate_unique<T>(allocator<T>(), std::forward<Args>(args)...);
}
template <typename T, typename... Args>
auto make_shared(Args&&... args) {
static_assert(!std::is_array_v<T>, "T cannot be an array");
return std::allocate_shared<T>(allocator<T>(), std::forward<Args>(args)...);
}
template <typename T, typename Allocator> template <typename T, typename Allocator>
auto nullptr_unique(const Allocator& allocator = Allocator()) noexcept { auto nullptr_unique(const Allocator& allocator = Allocator()) noexcept {
static_assert(!std::is_array_v<T>, "T cannot be an array"); static_assert(!std::is_array_v<T>, "T cannot be an array");
@@ -88,46 +88,6 @@ MockClass::Mocker* MockClass::Mocker::instance_ = nullptr;
} // namespace } // namespace
TEST(StdSupportMemoryTest, Allocator) {
using Allocator = std_support::allocator<Class>;
using Traits = std::allocator_traits<Allocator>;
Allocator allocator;
Class* ptr = Traits::allocate(allocator, 1);
new (ptr) Class(42);
EXPECT_THAT(ptr->x(), 42);
Traits::deallocate(allocator, ptr, 1);
}
TEST(StdSupportMemoryTest, AllocatorFromWrongClass) {
using WrongClassAllocator = std_support::allocator<EmptyClass>;
WrongClassAllocator base;
using Allocator = typename std::allocator_traits<WrongClassAllocator>::template rebind_alloc<Class>;
using Traits = typename std::allocator_traits<WrongClassAllocator>::template rebind_traits<Class>;
Allocator allocator = Allocator(base);
Class* ptr = Traits::allocate(allocator, 1);
new (ptr) Class(42);
EXPECT_THAT(ptr->x(), 42);
Traits::deallocate(allocator, ptr, 1);
}
TEST(StdSupportMemoryTest, MakeUnique) {
auto ptr = std_support::make_unique<Class>(42);
EXPECT_THAT(ptr->x(), 42);
}
TEST(StdSupportMemoryTest, MakeUniqueThrows) {
EXPECT_THROW(std_support::make_unique<ClassThrows>(42), int);
}
TEST(StdSupportMemoryTest, MakeShared) {
auto ptr = std_support::make_shared<Class>(42);
EXPECT_THAT(ptr->x(), 42);
}
TEST(StdSupportMemoryTest, MakeSharedThrows) {
EXPECT_THROW(std_support::make_shared<ClassThrows>(42), int);
}
TEST(StdSupportMemoryTest, AllocatorNew) { TEST(StdSupportMemoryTest, AllocatorNew) {
testing::StrictMock<test_support::MockAllocatorCore> allocatorCore; testing::StrictMock<test_support::MockAllocatorCore> allocatorCore;
testing::StrictMock<MockClass::Mocker> mocker; testing::StrictMock<MockClass::Mocker> mocker;
@@ -260,56 +220,11 @@ TEST(StdSupportMemoryTest, AllocateUniqueWrongType) {
ptr.reset(); ptr.reset();
} }
template <typename T, typename Allocator>
using UniquePtr = std_support::unique_ptr<T, std_support::allocator_deleter<T, Allocator>>;
TEST(StdSupportMemoryTest, UniquePtrConversions) {
static_assert(std::is_convertible_v<std_support::unique_ptr<DerivedClass>, std_support::unique_ptr<Class>>);
static_assert(!std::is_convertible_v<std_support::unique_ptr<Class>, std_support::unique_ptr<DerivedClass>>);
static_assert(!std::is_convertible_v<std_support::unique_ptr<Class>, std_support::unique_ptr<int>>);
static_assert(!std::is_convertible_v<std_support::unique_ptr<int>, std_support::unique_ptr<Class>>);
static_assert(!std::is_assignable_v<std_support::unique_ptr<DerivedClass>, std_support::unique_ptr<Class>>);
static_assert(std::is_assignable_v<std_support::unique_ptr<Class>, std_support::unique_ptr<DerivedClass>>);
static_assert(!std::is_assignable_v<std_support::unique_ptr<Class>, std_support::unique_ptr<int>>);
static_assert(!std::is_assignable_v<std_support::unique_ptr<int>, std_support::unique_ptr<Class>>);
using AllocatorClass = test_support::Allocator<Class, test_support::MockAllocatorCore>;
using AllocatorDerivedClass = test_support::Allocator<DerivedClass, test_support::MockAllocatorCore>;
using AllocatorInt = test_support::Allocator<int, test_support::MockAllocatorCore>;
static_assert(std::is_convertible_v<UniquePtr<DerivedClass, AllocatorClass>, UniquePtr<Class, AllocatorClass>>);
static_assert(std::is_convertible_v<UniquePtr<DerivedClass, AllocatorDerivedClass>, UniquePtr<Class, AllocatorClass>>);
static_assert(std::is_convertible_v<UniquePtr<DerivedClass, AllocatorClass>, UniquePtr<Class, AllocatorDerivedClass>>);
static_assert(std::is_convertible_v<UniquePtr<DerivedClass, AllocatorDerivedClass>, UniquePtr<Class, AllocatorDerivedClass>>);
static_assert(!std::is_convertible_v<UniquePtr<Class, AllocatorClass>, UniquePtr<DerivedClass, AllocatorClass>>);
static_assert(!std::is_convertible_v<UniquePtr<Class, AllocatorDerivedClass>, UniquePtr<DerivedClass, AllocatorClass>>);
static_assert(!std::is_convertible_v<UniquePtr<Class, AllocatorClass>, UniquePtr<DerivedClass, AllocatorDerivedClass>>);
static_assert(!std::is_convertible_v<UniquePtr<Class, AllocatorDerivedClass>, UniquePtr<DerivedClass, AllocatorDerivedClass>>);
static_assert(!std::is_convertible_v<UniquePtr<Class, AllocatorClass>, UniquePtr<int, AllocatorInt>>);
static_assert(!std::is_convertible_v<UniquePtr<Class, AllocatorDerivedClass>, UniquePtr<int, AllocatorInt>>);
static_assert(!std::is_convertible_v<UniquePtr<int, AllocatorInt>, UniquePtr<Class, AllocatorClass>>);
static_assert(!std::is_convertible_v<UniquePtr<int, AllocatorInt>, UniquePtr<DerivedClass, AllocatorDerivedClass>>);
static_assert(!std::is_assignable_v<UniquePtr<DerivedClass, AllocatorClass>, UniquePtr<Class, AllocatorClass>>);
static_assert(!std::is_assignable_v<UniquePtr<DerivedClass, AllocatorDerivedClass>, UniquePtr<Class, AllocatorClass>>);
static_assert(!std::is_assignable_v<UniquePtr<DerivedClass, AllocatorClass>, UniquePtr<Class, AllocatorDerivedClass>>);
static_assert(!std::is_assignable_v<UniquePtr<DerivedClass, AllocatorDerivedClass>, UniquePtr<Class, AllocatorDerivedClass>>);
static_assert(std::is_assignable_v<UniquePtr<Class, AllocatorClass>, UniquePtr<DerivedClass, AllocatorClass>>);
static_assert(std::is_assignable_v<UniquePtr<Class, AllocatorDerivedClass>, UniquePtr<DerivedClass, AllocatorClass>>);
static_assert(std::is_assignable_v<UniquePtr<Class, AllocatorClass>, UniquePtr<DerivedClass, AllocatorDerivedClass>>);
static_assert(std::is_assignable_v<UniquePtr<Class, AllocatorDerivedClass>, UniquePtr<DerivedClass, AllocatorDerivedClass>>);
static_assert(!std::is_assignable_v<UniquePtr<Class, AllocatorClass>, UniquePtr<int, AllocatorInt>>);
static_assert(!std::is_assignable_v<UniquePtr<Class, AllocatorDerivedClass>, UniquePtr<int, AllocatorInt>>);
static_assert(!std::is_assignable_v<UniquePtr<int, AllocatorInt>, UniquePtr<Class, AllocatorClass>>);
static_assert(!std::is_assignable_v<UniquePtr<int, AllocatorInt>, UniquePtr<DerivedClass, AllocatorDerivedClass>>);
}
TEST(StdSupportMemoryTest, NullptrUnique) { TEST(StdSupportMemoryTest, NullptrUnique) {
testing::StrictMock<test_support::MockAllocatorCore> allocatorCore; testing::StrictMock<test_support::MockAllocatorCore> allocatorCore;
auto allocator = test_support::MakeAllocator<int>(allocatorCore); auto allocator = test_support::MakeAllocator<int>(allocatorCore);
std_support::unique_ptr<int, std_support::allocator_deleter<int, decltype(allocator)>> ptr = std::unique_ptr<int, std_support::allocator_deleter<int, decltype(allocator)>> ptr =
std_support::nullptr_unique<int>(allocator); std_support::nullptr_unique<int>(allocator);
EXPECT_THAT(ptr.get(), nullptr); EXPECT_THAT(ptr.get(), nullptr);
} }
@@ -1,20 +0,0 @@
/*
* Copyright 2010-2022 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license
* that can be found in the LICENSE file.
*/
#include "std_support/New.hpp"
#include "std_support/CStdlib.hpp"
using namespace kotlin;
// TODO: Maybe malloc instead of calloc?
void* operator new(std::size_t count, kotlin::std_support::kalloc_t) noexcept {
return std_support::calloc(1, count);
}
void operator delete(void* ptr, kotlin::std_support::kalloc_t) noexcept {
std_support::free(ptr);
}
@@ -1,31 +0,0 @@
/*
* Copyright 2010-2022 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license
* that can be found in the LICENSE file.
*/
#pragma once
#include <new>
namespace kotlin::std_support {
struct kalloc_t {};
inline constexpr kalloc_t kalloc = kotlin::std_support::kalloc_t{};
} // namespace kotlin::std_support
// TODO: Add align_val_t overloads once we make sure all targets support aligned allocation.
// (also requires removing `-fno-aligned-allocation` compiler flag).
void* operator new(std::size_t count, kotlin::std_support::kalloc_t) noexcept;
void operator delete(void* ptr, kotlin::std_support::kalloc_t) noexcept;
namespace kotlin::std_support {
template <typename T>
void kdelete(T* ptr) noexcept {
ptr->~T();
::operator delete(ptr, kalloc);
}
} // namespace kotlin::std_support
@@ -1,45 +0,0 @@
/*
* Copyright 2010-2022 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license
* that can be found in the LICENSE file.
*/
#include "std_support/New.hpp"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
using namespace kotlin;
namespace {
class Class {
public:
explicit Class(int32_t x = 17) : x_(x) {}
int32_t x() const { return x_; }
private:
int32_t x_;
};
class ClassThrows {
public:
explicit ClassThrows(int32_t x = 17) : x_(x) { throw 13; }
int32_t x() const { return x_; }
private:
int32_t x_;
};
} // namespace
TEST(NewTest, NewDelete) {
Class* ptr = new (std_support::kalloc) Class(42);
EXPECT_THAT(ptr->x(), 42);
std_support::kdelete(ptr);
}
TEST(NewTest, NewThrows) {
EXPECT_THROW(new (std_support::kalloc) ClassThrows(42), int);
}
@@ -12,16 +12,6 @@ Proposals:
Adjustments: Adjustments:
* `CStdlib.hpp` - * `CStdlib.hpp` -
`std_support::malloc`, `std_support::calloc`, `std_support::realloc`, `std_support::free` that use custom allocation scheme,
`std_support::aligned_malloc` and `std_support::aligned_free` as a version of `malloc` and `free` that allows changing alignment. `std_support::aligned_malloc` and `std_support::aligned_free` as a version of `malloc` and `free` that allows changing alignment.
* `Memory.hpp` - * `Memory.hpp` -
`std_support::allocator` using `std_support::calloc`/`std_support::free`,
`std_support::default_delete` that uses `std_support::free`,
`std_support::unique_ptr` that uses `std_support::default_delete`,
`std_support::make_unique` and `std_support::make_shared` that use `std_support::allocator`,
`std_support::nullptr_unique` - `nullptr` replacement for `unique_ptr` that takes an allocator. `std_support::nullptr_unique` - `nullptr` replacement for `unique_ptr` that takes an allocator.
* `New.hpp` -
custom operator `new` with `std_support::kalloc` marker argument that delegates to `std_support` allocator,
`std_support::kdelete` as a replacement for operator `delete` for objects created with custom `new`.
* `Deque.hpp`, `ForwardList.hpp`, `List.hpp`, `Map.hpp`, `Set.hpp`, `String.hpp`, `UnorderedMap.hpp`, `UnorderedSet.hpp`, `Vector.hpp` -
standard containers and `std_support::string` that default to using `std_support::allocator`.
@@ -1,20 +0,0 @@
/*
* Copyright 2010-2022 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license
* that can be found in the LICENSE file.
*/
#pragma once
#include <set>
#include "std_support/Memory.hpp"
namespace kotlin::std_support {
template <typename Key, typename Compare = std::less<Key>, typename Allocator = allocator<Key>>
using set = std::set<Key, Compare, Allocator>;
template <typename Key, typename Compare = std::less<Key>, typename Allocator = allocator<Key>>
using multiset = std::multiset<Key, Compare, Allocator>;
} // namespace kotlin::std_support
@@ -1,22 +0,0 @@
/*
* Copyright 2010-2022 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license
* that can be found in the LICENSE file.
*/
#pragma once
#include <string>
#include "std_support/Memory.hpp"
namespace kotlin::std_support {
template <typename CharT, typename Traits = std::char_traits<CharT>, typename Allocator = allocator<CharT>>
using basic_string = std::basic_string<CharT, Traits, Allocator>;
using string = basic_string<char>;
using wstring = basic_string<wchar_t>;
using u16string = basic_string<char16_t>;
using u32string = basic_string<char32_t>;
} // namespace kotlin::std_support
@@ -1,30 +0,0 @@
/*
* Copyright 2010-2022 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license
* that can be found in the LICENSE file.
*/
#pragma once
#include <unordered_map>
#include "std_support/Memory.hpp"
namespace kotlin::std_support {
template <
typename Key,
typename T,
typename Hash = std::hash<Key>,
typename KeyEqual = std::equal_to<Key>,
typename Allocator = allocator<std::pair<const Key, T>>>
using unordered_map = std::unordered_map<Key, T, Hash, KeyEqual, Allocator>;
template <
typename Key,
typename T,
typename Hash = std::hash<Key>,
typename KeyEqual = std::equal_to<Key>,
typename Allocator = allocator<std::pair<const Key, T>>>
using unordered_multimap = std::unordered_multimap<Key, T, Hash, KeyEqual, Allocator>;
} // namespace kotlin::std_support
@@ -1,20 +0,0 @@
/*
* Copyright 2010-2022 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license
* that can be found in the LICENSE file.
*/
#pragma once
#include <unordered_set>
#include "std_support/Memory.hpp"
namespace kotlin::std_support {
template <typename Key, typename Hash = std::hash<Key>, typename KeyEqual = std::equal_to<Key>, typename Allocator = allocator<Key>>
using unordered_set = std::unordered_set<Key, Hash, KeyEqual, Allocator>;
template <typename Key, typename Hash = std::hash<Key>, typename KeyEqual = std::equal_to<Key>, typename Allocator = allocator<Key>>
using unordered_multiset = std::unordered_multiset<Key, Hash, KeyEqual, Allocator>;
} // namespace kotlin::std_support
@@ -1,17 +0,0 @@
/*
* Copyright 2010-2022 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license
* that can be found in the LICENSE file.
*/
#pragma once
#include <vector>
#include "std_support/Memory.hpp"
namespace kotlin::std_support {
template <typename T, typename Allocator = allocator<T>>
using vector = std::vector<T, Allocator>;
} // namespace kotlin::std_support
@@ -6,9 +6,9 @@
#pragma once #pragma once
#include <atomic> #include <atomic>
#include <memory>
#include "Utils.hpp" #include "Utils.hpp"
#include "std_support/Memory.hpp"
namespace kotlin::mm { namespace kotlin::mm {
@@ -33,7 +33,7 @@ private:
// TODO: The initial value might be incorrect. // TODO: The initial value might be incorrect.
std::atomic<State> state_ = State::kForeground; std::atomic<State> state_ = State::kForeground;
std_support::unique_ptr<Impl> impl_; std::unique_ptr<Impl> impl_;
}; };
} // namespace kotlin::mm } // namespace kotlin::mm
@@ -34,7 +34,7 @@ mm::AppStateTracking::AppStateTracking() noexcept {
case compiler::AppStateTracking::kDisabled: case compiler::AppStateTracking::kDisabled:
break; break;
case compiler::AppStateTracking::kEnabled: case compiler::AppStateTracking::kEnabled:
impl_ = std_support::make_unique<Impl>([this](State state) noexcept { setState(state); }); impl_ = std::make_unique<Impl>([this](State state) noexcept { setState(state); });
break; break;
} }
} }
@@ -7,6 +7,7 @@
#include <string_view> #include <string_view>
#include <cstring> #include <cstring>
#include <unordered_set>
#include "KAssert.h" #include "KAssert.h"
#include "Memory.h" #include "Memory.h"
@@ -15,7 +16,6 @@
#include "ThreadData.hpp" #include "ThreadData.hpp"
#include "ThreadRegistry.hpp" #include "ThreadRegistry.hpp"
#include "ExecFormat.h" #include "ExecFormat.h"
#include "std_support/UnorderedSet.hpp"
using namespace kotlin; using namespace kotlin;
@@ -313,7 +313,7 @@ public:
~KnownFunctionChecker() = delete; ~KnownFunctionChecker() = delete;
private: private:
std_support::unordered_set<const void*> known_functions_; std::unordered_set<const void*> known_functions_;
std::string_view good_names_copy_[sizeof(Kotlin_callsCheckerGoodFunctionNames) / sizeof(Kotlin_callsCheckerGoodFunctionNames[0])]; std::string_view good_names_copy_[sizeof(Kotlin_callsCheckerGoodFunctionNames) / sizeof(Kotlin_callsCheckerGoodFunctionNames[0])];
}; };
@@ -4,6 +4,7 @@
*/ */
#include <thread> #include <thread>
#include <vector>
#include "gmock/gmock.h" #include "gmock/gmock.h"
#include "gtest/gtest.h" #include "gtest/gtest.h"
@@ -12,7 +13,6 @@
#include "TestSupport.hpp" #include "TestSupport.hpp"
#include "ThreadData.hpp" #include "ThreadData.hpp"
#include "Types.h" #include "Types.h"
#include "std_support/Vector.hpp"
using namespace kotlin; using namespace kotlin;
@@ -20,9 +20,9 @@ namespace {
class ExceptionObjHolderTest : public ::testing::Test { class ExceptionObjHolderTest : public ::testing::Test {
public: public:
static std_support::vector<ObjHeader*> Collect(mm::ThreadData& threadData) { static std::vector<ObjHeader*> Collect(mm::ThreadData& threadData) {
threadData.specialRefRegistry().publish(); threadData.specialRefRegistry().publish();
std_support::vector<ObjHeader*> result; std::vector<ObjHeader*> result;
for (const auto& obj : mm::SpecialRefRegistry::instance().roots()) { for (const auto& obj : mm::SpecialRefRegistry::instance().roots()) {
result.push_back(obj); result.push_back(obj);
} }
@@ -5,14 +5,15 @@
#include "Freezing.hpp" #include "Freezing.hpp"
#include <unordered_set>
#include <vector>
#include "ExtraObjectData.hpp" #include "ExtraObjectData.hpp"
#include "FreezeHooks.hpp" #include "FreezeHooks.hpp"
#include "Memory.h" #include "Memory.h"
#include "Natives.h" #include "Natives.h"
#include "ObjectTraversal.hpp" #include "ObjectTraversal.hpp"
#include "Types.h" #include "Types.h"
#include "std_support/UnorderedSet.hpp"
#include "std_support/Vector.hpp"
using namespace kotlin; using namespace kotlin;
@@ -31,10 +32,10 @@ bool mm::IsFrozen(const ObjHeader* object) noexcept {
ObjHeader* mm::FreezeSubgraph(ObjHeader* root) noexcept { ObjHeader* mm::FreezeSubgraph(ObjHeader* root) noexcept {
if (IsFrozen(root)) return nullptr; if (IsFrozen(root)) return nullptr;
std_support::vector<ObjHeader*> objects; std::vector<ObjHeader*> objects;
std_support::vector<ObjHeader*> stack; std::vector<ObjHeader*> stack;
// TODO: This may be a suboptimal container for the job. // TODO: This may be a suboptimal container for the job.
std_support::unordered_set<ObjHeader*> visited; std::unordered_set<ObjHeader*> visited;
stack.push_back(root); stack.push_back(root);
while (!stack.empty()) { while (!stack.empty()) {
ObjHeader* object = stack.back(); ObjHeader* object = stack.back();

Some files were not shown because too many files have changed in this diff Show More