From 18fbdbb456c07a56b358bea2e392765fbcbb3283 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pedro=20J=2E=20Est=C3=A9banez?= Date: Wed, 26 Feb 2020 11:28:13 +0100 Subject: Reimplement Mutex with C++'s Main: - It's now implemented thanks to ``. No more platform-specific implementations. - `BinaryMutex` (non-recursive) is added, as an alternative for special cases. - Doesn't need allocation/deallocation anymore. It can live in the stack and be part of other classes. - Because of that, it's methods are now `const` and the inner mutex is `mutable` so it can be easily used in `const` contexts. - A no-op implementation is provided if `NO_THREADS` is defined. No more need to add `#ifdef NO_THREADS` just for this. - `MutexLock` now takes a reference. At this point the cases of null `Mutex`es are rare. If you ever need that, just don't use `MutexLock`. - Thread-safe utilities are therefore simpler now. Misc.: - `ScopedMutexLock` is dropped and replaced by `MutexLock`, because they were pretty much the same. - Every case of lock, do-something, unlock is replaced by `MutexLock` (complex cases where it's not straightfoward are kept as as explicit lock and unlock). - `ShaderRD` contained an `std::mutex`, which has been replaced by `Mutex`. --- core/io/file_access_network.cpp | 61 ++++++++++++++++++++--------------------- 1 file changed, 29 insertions(+), 32 deletions(-) (limited to 'core/io/file_access_network.cpp') diff --git a/core/io/file_access_network.cpp b/core/io/file_access_network.cpp index 202eb89dbd..7f1eb6fd90 100644 --- a/core/io/file_access_network.cpp +++ b/core/io/file_access_network.cpp @@ -42,14 +42,14 @@ void FileAccessNetworkClient::lock_mutex() { - mutex->lock(); + mutex.lock(); lockcount++; } void FileAccessNetworkClient::unlock_mutex() { lockcount--; - mutex->unlock(); + mutex.unlock(); } void FileAccessNetworkClient::put_32(int p_32) { @@ -97,15 +97,16 @@ void FileAccessNetworkClient::_thread_func() { lock_mutex(); DEBUG_PRINT("MUTEX PASS"); - blockrequest_mutex->lock(); - while (block_requests.size()) { - put_32(block_requests.front()->get().id); - put_32(FileAccessNetwork::COMMAND_READ_BLOCK); - put_64(block_requests.front()->get().offset); - put_32(block_requests.front()->get().size); - block_requests.pop_front(); + { + MutexLock lock(blockrequest_mutex); + while (block_requests.size()) { + put_32(block_requests.front()->get().id); + put_32(FileAccessNetwork::COMMAND_READ_BLOCK); + put_64(block_requests.front()->get().offset); + put_32(block_requests.front()->get().size); + block_requests.pop_front(); + } } - blockrequest_mutex->unlock(); DEBUG_PRINT("THREAD ITER"); @@ -225,8 +226,6 @@ FileAccessNetworkClient *FileAccessNetworkClient::singleton = NULL; FileAccessNetworkClient::FileAccessNetworkClient() { thread = NULL; - mutex = Mutex::create(); - blockrequest_mutex = Mutex::create(); quit = false; singleton = this; last_id = 0; @@ -244,8 +243,6 @@ FileAccessNetworkClient::~FileAccessNetworkClient() { memdelete(thread); } - memdelete(blockrequest_mutex); - memdelete(mutex); memdelete(sem); } @@ -259,10 +256,11 @@ void FileAccessNetwork::_set_block(int p_offset, const Vector &p_block) ERR_FAIL_COND((p_block.size() != (int)(total_size % page_size))); } - buffer_mutex->lock(); - pages.write[page].buffer = p_block; - pages.write[page].queued = false; - buffer_mutex->unlock(); + { + MutexLock lock(buffer_mutex); + pages.write[page].buffer = p_block; + pages.write[page].queued = false; + } if (waiting_on_page == page) { waiting_on_page = -1; @@ -384,15 +382,16 @@ void FileAccessNetwork::_queue_page(int p_page) const { if (pages[p_page].buffer.empty() && !pages[p_page].queued) { FileAccessNetworkClient *nc = FileAccessNetworkClient::singleton; - - nc->blockrequest_mutex->lock(); - FileAccessNetworkClient::BlockRequest br; - br.id = id; - br.offset = size_t(p_page) * page_size; - br.size = page_size; - nc->block_requests.push_back(br); - pages.write[p_page].queued = true; - nc->blockrequest_mutex->unlock(); + { + MutexLock lock(nc->blockrequest_mutex); + + FileAccessNetworkClient::BlockRequest br; + br.id = id; + br.offset = size_t(p_page) * page_size; + br.size = page_size; + nc->block_requests.push_back(br); + pages.write[p_page].queued = true; + } DEBUG_PRINT("QUEUE PAGE POST"); nc->sem->post(); DEBUG_PRINT("queued " + itos(p_page)); @@ -418,14 +417,14 @@ int FileAccessNetwork::get_buffer(uint8_t *p_dst, int p_length) const { int page = pos / page_size; if (page != last_page) { - buffer_mutex->lock(); + buffer_mutex.lock(); if (pages[page].buffer.empty()) { waiting_on_page = page; for (int j = 0; j < read_ahead; j++) { _queue_page(page + j); } - buffer_mutex->unlock(); + buffer_mutex.unlock(); DEBUG_PRINT("wait"); page_sem->wait(); DEBUG_PRINT("done"); @@ -436,7 +435,7 @@ int FileAccessNetwork::get_buffer(uint8_t *p_dst, int p_length) const { _queue_page(page + j); } //queue pages - buffer_mutex->unlock(); + buffer_mutex.unlock(); } buff = pages.write[page].buffer.ptrw(); @@ -524,7 +523,6 @@ FileAccessNetwork::FileAccessNetwork() { pos = 0; sem = SemaphoreOld::create(); page_sem = SemaphoreOld::create(); - buffer_mutex = Mutex::create(); FileAccessNetworkClient *nc = FileAccessNetworkClient::singleton; nc->lock_mutex(); id = nc->last_id++; @@ -542,7 +540,6 @@ FileAccessNetwork::~FileAccessNetwork() { close(); memdelete(sem); memdelete(page_sem); - memdelete(buffer_mutex); FileAccessNetworkClient *nc = FileAccessNetworkClient::singleton; nc->lock_mutex(); -- cgit v1.2.3