diff options
| author | Rémi Verschelde <rverschelde@gmail.com> | 2023-05-09 17:44:45 +0200 |
|---|---|---|
| committer | Rémi Verschelde <rverschelde@gmail.com> | 2023-05-09 17:44:45 +0200 |
| commit | 58ea42e4abda1f89b968a4483aaf66e4849c3ff1 (patch) | |
| tree | b3a81bd7daef4b5033a56c8d467cf0f526a42b51 | |
| parent | 5ade250c7d0b9e7f0cb30f372858e42cbc274abf (diff) | |
| parent | 341b95871cdbffe8861735b507f12fab363bc469 (diff) | |
| download | redot-engine-58ea42e4abda1f89b968a4483aaf66e4849c3ff1.tar.gz | |
Merge pull request #76755 from RandomShaper/fix_allocator_thsafe
Add missing thread safety to PagedAllocator
| -rw-r--r-- | core/os/spin_lock.h | 6 | ||||
| -rw-r--r-- | core/templates/paged_allocator.h | 45 |
2 files changed, 42 insertions, 9 deletions
diff --git a/core/os/spin_lock.h b/core/os/spin_lock.h index 409154dbb5..93ea782b60 100644 --- a/core/os/spin_lock.h +++ b/core/os/spin_lock.h @@ -36,15 +36,15 @@ #include <atomic> class SpinLock { - std::atomic_flag locked = ATOMIC_FLAG_INIT; + mutable std::atomic_flag locked = ATOMIC_FLAG_INIT; public: - _ALWAYS_INLINE_ void lock() { + _ALWAYS_INLINE_ void lock() const { while (locked.test_and_set(std::memory_order_acquire)) { // Continue. } } - _ALWAYS_INLINE_ void unlock() { + _ALWAYS_INLINE_ void unlock() const { locked.clear(std::memory_order_release); } }; diff --git a/core/templates/paged_allocator.h b/core/templates/paged_allocator.h index 1cd71ec16c..deb2937771 100644 --- a/core/templates/paged_allocator.h +++ b/core/templates/paged_allocator.h @@ -99,7 +99,8 @@ public: } } - void reset(bool p_allow_unfreed = false) { +private: + void _reset(bool p_allow_unfreed) { if (!p_allow_unfreed || !std::is_trivially_destructible<T>::value) { ERR_FAIL_COND(allocs_available < pages_allocated * page_size); } @@ -116,16 +117,41 @@ public: allocs_available = 0; } } + +public: + void reset(bool p_allow_unfreed = false) { + if (thread_safe) { + spin_lock.lock(); + } + _reset(p_allow_unfreed); + if (thread_safe) { + spin_lock.unlock(); + } + } + bool is_configured() const { - return page_size > 0; + if (thread_safe) { + spin_lock.lock(); + } + bool result = page_size > 0; + if (thread_safe) { + spin_lock.unlock(); + } + return result; } void configure(uint32_t p_page_size) { + if (thread_safe) { + spin_lock.lock(); + } ERR_FAIL_COND(page_pool != nullptr); //sanity check ERR_FAIL_COND(p_page_size == 0); page_size = nearest_power_of_2_templated(p_page_size); page_mask = page_size - 1; page_shift = get_shift_from_power_of_2(page_size); + if (thread_safe) { + spin_lock.unlock(); + } } // Power of 2 recommended because of alignment with OS page sizes. @@ -135,13 +161,20 @@ public: } ~PagedAllocator() { - if (allocs_available < pages_allocated * page_size) { + if (thread_safe) { + spin_lock.lock(); + } + bool leaked = allocs_available < pages_allocated * page_size; + if (leaked) { if (CoreGlobals::leak_reporting_enabled) { - ERR_FAIL_COND_MSG(allocs_available < pages_allocated * page_size, String("Pages in use exist at exit in PagedAllocator: ") + String(typeid(T).name())); + ERR_PRINT(String("Pages in use exist at exit in PagedAllocator: ") + String(typeid(T).name())); } - return; + } else { + _reset(false); + } + if (thread_safe) { + spin_lock.unlock(); } - reset(); } }; |
