summaryrefslogtreecommitdiffstats
path: root/core
diff options
context:
space:
mode:
Diffstat (limited to 'core')
-rw-r--r--core/io/resource_format_binary.cpp47
-rw-r--r--core/io/resource_format_binary.h2
-rw-r--r--core/io/resource_loader.cpp601
-rw-r--r--core/io/resource_loader.h56
-rw-r--r--core/math/basis.cpp4
-rw-r--r--core/math/color.cpp85
-rw-r--r--core/math/color.h8
-rw-r--r--core/math/convex_hull.cpp20
-rw-r--r--core/math/math_funcs.h10
-rw-r--r--core/math/quaternion.cpp3
-rw-r--r--core/math/static_raycaster.h16
-rw-r--r--core/object/message_queue.cpp164
-rw-r--r--core/object/message_queue.h14
-rw-r--r--core/object/object.h2
-rw-r--r--core/object/worker_thread_pool.cpp22
-rw-r--r--core/object/worker_thread_pool.h4
-rw-r--r--core/os/mutex.h19
-rw-r--r--core/os/thread.cpp5
-rw-r--r--core/os/thread.h2
-rw-r--r--core/os/time.cpp8
-rw-r--r--core/register_core_types.cpp12
-rw-r--r--core/variant/variant_call.cpp1
-rw-r--r--core/variant/variant_setget.cpp4
-rw-r--r--core/variant/variant_setget.h4
24 files changed, 570 insertions, 543 deletions
diff --git a/core/io/resource_format_binary.cpp b/core/io/resource_format_binary.cpp
index 38f41d645c..b4da314e96 100644
--- a/core/io/resource_format_binary.cpp
+++ b/core/io/resource_format_binary.cpp
@@ -445,13 +445,12 @@ Error ResourceLoaderBinary::parse_variant(Variant &r_v) {
WARN_PRINT("Broken external resource! (index out of size)");
r_v = Variant();
} else {
- if (external_resources[erindex].cache.is_null()) {
- //cache not here yet, wait for it?
- if (use_sub_threads) {
- Error err;
- external_resources.write[erindex].cache = ResourceLoader::load_threaded_get(external_resources[erindex].path, &err);
-
- if (err != OK || external_resources[erindex].cache.is_null()) {
+ Ref<ResourceLoader::LoadToken> &load_token = external_resources.write[erindex].load_token;
+ if (load_token.is_valid()) { // If not valid, it's OK since then we know this load accepts broken dependencies.
+ Error err;
+ Ref<Resource> res = ResourceLoader::_load_complete(*load_token.ptr(), &err);
+ if (res.is_null()) {
+ if (!ResourceLoader::is_cleaning_tasks()) {
if (!ResourceLoader::get_abort_on_missing_resources()) {
ResourceLoader::notify_dependency_error(local_path, external_resources[erindex].path, external_resources[erindex].type);
} else {
@@ -459,12 +458,11 @@ Error ResourceLoaderBinary::parse_variant(Variant &r_v) {
ERR_FAIL_V_MSG(error, "Can't load dependency: " + external_resources[erindex].path + ".");
}
}
+ } else {
+ r_v = res;
}
}
-
- r_v = external_resources[erindex].cache;
}
-
} break;
default: {
ERR_FAIL_V(ERR_FILE_CORRUPT);
@@ -684,28 +682,13 @@ Error ResourceLoaderBinary::load() {
}
external_resources.write[i].path = path; //remap happens here, not on load because on load it can actually be used for filesystem dock resource remap
-
- if (!use_sub_threads) {
- external_resources.write[i].cache = ResourceLoader::load(path, external_resources[i].type);
-
- if (external_resources[i].cache.is_null()) {
- if (!ResourceLoader::get_abort_on_missing_resources()) {
- ResourceLoader::notify_dependency_error(local_path, path, external_resources[i].type);
- } else {
- error = ERR_FILE_MISSING_DEPENDENCIES;
- ERR_FAIL_V_MSG(error, "Can't load dependency: " + path + ".");
- }
- }
-
- } else {
- Error err = ResourceLoader::load_threaded_request(path, external_resources[i].type, use_sub_threads, ResourceFormatLoader::CACHE_MODE_REUSE, local_path);
- if (err != OK) {
- if (!ResourceLoader::get_abort_on_missing_resources()) {
- ResourceLoader::notify_dependency_error(local_path, path, external_resources[i].type);
- } else {
- error = ERR_FILE_MISSING_DEPENDENCIES;
- ERR_FAIL_V_MSG(error, "Can't load dependency: " + path + ".");
- }
+ external_resources.write[i].load_token = ResourceLoader::_load_start(path, external_resources[i].type, use_sub_threads ? ResourceLoader::LOAD_THREAD_DISTRIBUTE : ResourceLoader::LOAD_THREAD_FROM_CURRENT, ResourceFormatLoader::CACHE_MODE_REUSE);
+ if (!external_resources[i].load_token.is_valid()) {
+ if (!ResourceLoader::get_abort_on_missing_resources()) {
+ ResourceLoader::notify_dependency_error(local_path, path, external_resources[i].type);
+ } else {
+ error = ERR_FILE_MISSING_DEPENDENCIES;
+ ERR_FAIL_V_MSG(error, "Can't load dependency: " + path + ".");
}
}
}
diff --git a/core/io/resource_format_binary.h b/core/io/resource_format_binary.h
index add7cdf297..30f1664983 100644
--- a/core/io/resource_format_binary.h
+++ b/core/io/resource_format_binary.h
@@ -60,7 +60,7 @@ class ResourceLoaderBinary {
String path;
String type;
ResourceUID::ID uid = ResourceUID::INVALID_ID;
- Ref<Resource> cache;
+ Ref<ResourceLoader::LoadToken> load_token;
};
bool using_named_scene_ids = false;
diff --git a/core/io/resource_loader.cpp b/core/io/resource_loader.cpp
index 9af3a7daed..a27341dd2c 100644
--- a/core/io/resource_loader.cpp
+++ b/core/io/resource_loader.cpp
@@ -202,20 +202,71 @@ void ResourceFormatLoader::_bind_methods() {
///////////////////////////////////
+// This should be robust enough to be called redundantly without issues.
+void ResourceLoader::LoadToken::clear() {
+ thread_load_mutex.lock();
+
+ WorkerThreadPool::TaskID task_to_await = 0;
+
+ if (!local_path.is_empty()) { // Empty is used for the special case where the load task is not registered.
+ DEV_ASSERT(thread_load_tasks.has(local_path));
+ ThreadLoadTask &load_task = thread_load_tasks[local_path];
+ if (!load_task.awaited) {
+ task_to_await = load_task.task_id;
+ load_task.awaited = true;
+ }
+ thread_load_tasks.erase(local_path);
+ local_path.clear();
+ }
+
+ if (!user_path.is_empty()) {
+ DEV_ASSERT(user_load_tokens.has(user_path));
+ user_load_tokens.erase(user_path);
+ user_path.clear();
+ }
+
+ thread_load_mutex.unlock();
+
+ // If task is unused, await it here, locally, now the token data is consistent.
+ if (task_to_await) {
+ WorkerThreadPool::get_singleton()->wait_for_task_completion(task_to_await);
+ }
+}
+
+ResourceLoader::LoadToken::~LoadToken() {
+ clear();
+}
+
Ref<Resource> ResourceLoader::_load(const String &p_path, const String &p_original_path, const String &p_type_hint, ResourceFormatLoader::CacheMode p_cache_mode, Error *r_error, bool p_use_sub_threads, float *r_progress) {
- bool found = false;
+ load_nesting++;
+ if (load_paths_stack.size()) {
+ thread_load_mutex.lock();
+ HashMap<String, ThreadLoadTask>::Iterator E = thread_load_tasks.find(load_paths_stack[load_paths_stack.size() - 1]);
+ if (E) {
+ E->value.sub_tasks.insert(p_path);
+ }
+ thread_load_mutex.unlock();
+ }
+ load_paths_stack.push_back(p_path);
// Try all loaders and pick the first match for the type hint
+ bool found = false;
+ Ref<Resource> res;
for (int i = 0; i < loader_count; i++) {
if (!loader[i]->recognize_path(p_path, p_type_hint)) {
continue;
}
found = true;
- Ref<Resource> res = loader[i]->load(p_path, !p_original_path.is_empty() ? p_original_path : p_path, r_error, p_use_sub_threads, r_progress, p_cache_mode);
- if (res.is_null()) {
- continue;
+ res = loader[i]->load(p_path, !p_original_path.is_empty() ? p_original_path : p_path, r_error, p_use_sub_threads, r_progress, p_cache_mode);
+ if (!res.is_null()) {
+ break;
}
+ }
+
+ load_paths_stack.resize(load_paths_stack.size() - 1);
+ load_nesting--;
+ if (!res.is_null()) {
return res;
}
@@ -232,47 +283,60 @@ Ref<Resource> ResourceLoader::_load(const String &p_path, const String &p_origin
void ResourceLoader::_thread_load_function(void *p_userdata) {
ThreadLoadTask &load_task = *(ThreadLoadTask *)p_userdata;
- load_task.loader_id = Thread::get_caller_id();
- if (load_task.cond_var) {
- //this is an actual thread, so wait for Ok from semaphore
- thread_load_semaphore->wait(); //wait until its ok to start loading
+ thread_load_mutex.lock();
+ caller_task_id = load_task.task_id;
+ if (cleaning_tasks) {
+ load_task.status = THREAD_LOAD_FAILED;
+ thread_load_mutex.unlock();
+ return;
}
- load_task.resource = _load(load_task.remapped_path, load_task.remapped_path != load_task.local_path ? load_task.local_path : String(), load_task.type_hint, load_task.cache_mode, &load_task.error, load_task.use_sub_threads, &load_task.progress);
+ thread_load_mutex.unlock();
- load_task.progress = 1.0; //it was fully loaded at this point, so force progress to 1.0
+ // Thread-safe either if it's the current thread or a brand new one.
+ CallQueue *mq_override = nullptr;
+ if (load_nesting == 0) {
+ if (!load_task.dependent_path.is_empty()) {
+ load_paths_stack.push_back(load_task.dependent_path);
+ }
+ if (!Thread::is_main_thread()) {
+ mq_override = memnew(CallQueue);
+ MessageQueue::set_thread_singleton_override(mq_override);
+ }
+ } else {
+ DEV_ASSERT(load_task.dependent_path.is_empty());
+ }
+ // --
- thread_load_mutex->lock();
+ Ref<Resource> res = _load(load_task.remapped_path, load_task.remapped_path != load_task.local_path ? load_task.local_path : String(), load_task.type_hint, load_task.cache_mode, &load_task.error, load_task.use_sub_threads, &load_task.progress);
+
+ thread_load_mutex.lock();
+
+ load_task.resource = res;
+
+ load_task.progress = 1.0; //it was fully loaded at this point, so force progress to 1.0
if (load_task.error != OK) {
load_task.status = THREAD_LOAD_FAILED;
} else {
load_task.status = THREAD_LOAD_LOADED;
}
- if (load_task.cond_var) {
- if (load_task.start_next && thread_waiting_count > 0) {
- thread_waiting_count--;
- //thread loading count remains constant, this ends but another one begins
- thread_load_semaphore->post();
- } else {
- thread_loading_count--; //no threads waiting, just reduce loading count
- }
-
- print_lt("END: load count: " + itos(thread_loading_count) + " / wait count: " + itos(thread_waiting_count) + " / suspended count: " + itos(thread_suspended_count) + " / active: " + itos(thread_loading_count - thread_suspended_count));
+ if (load_task.cond_var) {
load_task.cond_var->notify_all();
memdelete(load_task.cond_var);
load_task.cond_var = nullptr;
}
if (load_task.resource.is_valid()) {
- load_task.resource->set_path(load_task.local_path);
+ if (load_task.cache_mode != ResourceFormatLoader::CACHE_MODE_IGNORE) {
+ load_task.resource->set_path(load_task.local_path);
+ }
if (load_task.xl_remapped) {
load_task.resource->set_as_translation_remapped(true);
}
#ifdef TOOLS_ENABLED
-
load_task.resource->set_edited(false);
if (timestamp_on_load) {
uint64_t mt = FileAccess::get_modified_time(load_task.remapped_path);
@@ -286,7 +350,12 @@ void ResourceLoader::_thread_load_function(void *p_userdata) {
}
}
- thread_load_mutex->unlock();
+ thread_load_mutex.unlock();
+
+ if (load_nesting == 0 && mq_override) {
+ memdelete(mq_override);
+ MessageQueue::set_thread_singleton_override(nullptr);
+ }
}
static String _validate_local_path(const String &p_path) {
@@ -299,91 +368,127 @@ static String _validate_local_path(const String &p_path) {
return ProjectSettings::get_singleton()->localize_path(p_path);
}
}
-Error ResourceLoader::load_threaded_request(const String &p_path, const String &p_type_hint, bool p_use_sub_threads, ResourceFormatLoader::CacheMode p_cache_mode, const String &p_source_resource) {
- String local_path = _validate_local_path(p_path);
- thread_load_mutex->lock();
+Error ResourceLoader::load_threaded_request(const String &p_path, const String &p_type_hint, bool p_use_sub_threads, ResourceFormatLoader::CacheMode p_cache_mode) {
+ thread_load_mutex.lock();
+ if (user_load_tokens.has(p_path)) {
+ print_verbose("load_threaded_request(): Another threaded load for resource path '" + p_path + "' has been initiated. Not an error.");
+ user_load_tokens[p_path]->reference(); // Additional request.
+ thread_load_mutex.unlock();
+ return OK;
+ }
+ user_load_tokens[p_path] = nullptr;
+ thread_load_mutex.unlock();
+
+ Ref<ResourceLoader::LoadToken> token = _load_start(p_path, p_type_hint, p_use_sub_threads ? LOAD_THREAD_DISTRIBUTE : LOAD_THREAD_SPAWN_SINGLE, p_cache_mode);
+ if (token.is_valid()) {
+ thread_load_mutex.lock();
+ token->user_path = p_path;
+ token->reference(); // First request.
+ user_load_tokens[p_path] = token.ptr();
+ print_lt("REQUEST: user load tokens: " + itos(user_load_tokens.size()));
+ thread_load_mutex.unlock();
+ return OK;
+ } else {
+ return FAILED;
+ }
+}
- if (!p_source_resource.is_empty()) {
- //must be loading from this resource
- if (!thread_load_tasks.has(p_source_resource)) {
- thread_load_mutex->unlock();
- ERR_FAIL_V_MSG(ERR_INVALID_PARAMETER, "There is no thread loading source resource '" + p_source_resource + "'.");
- }
- //must not be already added as s sub tasks
- if (thread_load_tasks[p_source_resource].sub_tasks.has(local_path)) {
- thread_load_mutex->unlock();
- ERR_FAIL_V_MSG(ERR_INVALID_PARAMETER, "Thread loading source resource '" + p_source_resource + "' already is loading '" + local_path + "'.");
- }
+Ref<Resource> ResourceLoader::load(const String &p_path, const String &p_type_hint, ResourceFormatLoader::CacheMode p_cache_mode, Error *r_error) {
+ if (r_error) {
+ *r_error = OK;
}
- if (thread_load_tasks.has(local_path)) {
- thread_load_tasks[local_path].requests++;
- if (!p_source_resource.is_empty()) {
- thread_load_tasks[p_source_resource].sub_tasks.insert(local_path);
+ Ref<LoadToken> load_token = _load_start(p_path, p_type_hint, LOAD_THREAD_FROM_CURRENT, p_cache_mode);
+ if (!load_token.is_valid()) {
+ if (r_error) {
+ *r_error = FAILED;
}
- thread_load_mutex->unlock();
- return OK;
+ return Ref<Resource>();
}
- {
- //create load task
-
- ThreadLoadTask load_task;
+ Ref<Resource> res = _load_complete(*load_token.ptr(), r_error);
+ return res;
+}
- load_task.requests = 1;
- load_task.remapped_path = _path_remap(local_path, &load_task.xl_remapped);
- load_task.local_path = local_path;
- load_task.type_hint = p_type_hint;
- load_task.cache_mode = p_cache_mode;
- load_task.use_sub_threads = p_use_sub_threads;
+Ref<ResourceLoader::LoadToken> ResourceLoader::_load_start(const String &p_path, const String &p_type_hint, LoadThreadMode p_thread_mode, ResourceFormatLoader::CacheMode p_cache_mode) {
+ String local_path = _validate_local_path(p_path);
- { //must check if resource is already loaded before attempting to load it in a thread
+ Ref<LoadToken> load_token;
+ bool must_not_register = false;
+ ThreadLoadTask unregistered_load_task; // Once set, must be valid up to the call to do the load.
+ ThreadLoadTask *load_task_ptr = nullptr;
+ bool run_on_current_thread = false;
+ {
+ MutexLock thread_load_lock(thread_load_mutex);
- if (load_task.loader_id == Thread::get_caller_id()) {
- thread_load_mutex->unlock();
- ERR_FAIL_V_MSG(ERR_INVALID_PARAMETER, "Attempted to load a resource already being loaded from this thread, cyclic reference?");
+ if (thread_load_tasks.has(local_path)) {
+ load_token = Ref<LoadToken>(thread_load_tasks[local_path].load_token);
+ if (!load_token.is_valid()) {
+ // The token is dying (reached 0 on another thread).
+ // Ensure it's killed now so the path can be safely reused right away.
+ thread_load_tasks[local_path].load_token->clear();
+ } else {
+ if (p_cache_mode != ResourceFormatLoader::CACHE_MODE_IGNORE) {
+ return load_token;
+ }
}
+ }
- Ref<Resource> existing = ResourceCache::get_ref(local_path);
+ load_token.instantiate();
+ load_token->local_path = local_path;
- if (existing.is_valid()) {
- //referencing is fine
- load_task.resource = existing;
- load_task.status = THREAD_LOAD_LOADED;
- load_task.progress = 1.0;
+ //create load task
+ {
+ ThreadLoadTask load_task;
+
+ load_task.remapped_path = _path_remap(local_path, &load_task.xl_remapped);
+ load_task.load_token = load_token.ptr();
+ load_task.local_path = local_path;
+ load_task.type_hint = p_type_hint;
+ load_task.cache_mode = p_cache_mode;
+ load_task.use_sub_threads = p_thread_mode == LOAD_THREAD_DISTRIBUTE;
+ if (p_cache_mode != ResourceFormatLoader::CACHE_MODE_IGNORE) {
+ Ref<Resource> existing = ResourceCache::get_ref(local_path);
+ if (existing.is_valid()) {
+ //referencing is fine
+ load_task.resource = existing;
+ load_task.status = THREAD_LOAD_LOADED;
+ load_task.progress = 1.0;
+ thread_load_tasks[local_path] = load_task;
+ return load_token;
+ }
}
- }
-
- if (!p_source_resource.is_empty()) {
- thread_load_tasks[p_source_resource].sub_tasks.insert(local_path);
- }
- thread_load_tasks[local_path] = load_task;
- }
+ // If we want to ignore cache, but there's another task loading it, we can't add this one to the map and we also have to finish unconditionally synchronously.
+ must_not_register = thread_load_tasks.has(local_path) && p_cache_mode == ResourceFormatLoader::CACHE_MODE_IGNORE;
+ if (must_not_register) {
+ load_token->local_path.clear();
+ unregistered_load_task = load_task;
+ } else {
+ thread_load_tasks[local_path] = load_task;
+ }
- ThreadLoadTask &load_task = thread_load_tasks[local_path];
+ load_task_ptr = must_not_register ? &unregistered_load_task : &thread_load_tasks[local_path];
+ }
- if (load_task.resource.is_null()) { //needs to be loaded in thread
+ run_on_current_thread = must_not_register || p_thread_mode == LOAD_THREAD_FROM_CURRENT;
- load_task.cond_var = memnew(ConditionVariable);
- if (thread_loading_count < thread_load_max) {
- thread_loading_count++;
- thread_load_semaphore->post(); //we have free threads, so allow one
+ if (run_on_current_thread) {
+ load_task_ptr->thread_id = Thread::get_caller_id();
+ if (must_not_register) {
+ load_token->res_if_unregistered = load_task_ptr->resource;
+ }
} else {
- thread_waiting_count++;
+ load_task_ptr->task_id = WorkerThreadPool::get_singleton()->add_native_task(&ResourceLoader::_thread_load_function, load_task_ptr);
}
-
- print_lt("REQUEST: load count: " + itos(thread_loading_count) + " / wait count: " + itos(thread_waiting_count) + " / suspended count: " + itos(thread_suspended_count) + " / active: " + itos(thread_loading_count - thread_suspended_count));
-
- load_task.thread = memnew(Thread);
- load_task.thread->start(_thread_load_function, &thread_load_tasks[local_path]);
- load_task.loader_id = load_task.thread->get_id();
}
- thread_load_mutex->unlock();
+ if (run_on_current_thread) {
+ _thread_load_function(load_task_ptr);
+ }
- return OK;
+ return load_token;
}
float ResourceLoader::_dependency_get_progress(const String &p_path) {
@@ -409,13 +514,22 @@ float ResourceLoader::_dependency_get_progress(const String &p_path) {
}
ResourceLoader::ThreadLoadStatus ResourceLoader::load_threaded_get_status(const String &p_path, float *r_progress) {
- String local_path = _validate_local_path(p_path);
+ MutexLock thread_load_lock(thread_load_mutex);
+
+ if (!user_load_tokens.has(p_path)) {
+ print_verbose("load_threaded_get_status(): No threaded load for resource path '" + p_path + "' has been initiated or its result has already been collected.");
+ return THREAD_LOAD_INVALID_RESOURCE;
+ }
- thread_load_mutex->lock();
+ String local_path = _validate_local_path(p_path);
if (!thread_load_tasks.has(local_path)) {
- thread_load_mutex->unlock();
+#ifdef DEV_ENABLED
+ CRASH_NOW();
+#endif
+ // On non-dev, be defensive and at least avoid crashing (at this point at least).
return THREAD_LOAD_INVALID_RESOURCE;
}
+
ThreadLoadTask &load_task = thread_load_tasks[local_path];
ThreadLoadStatus status;
status = load_task.status;
@@ -423,198 +537,120 @@ ResourceLoader::ThreadLoadStatus ResourceLoader::load_threaded_get_status(const
*r_progress = _dependency_get_progress(local_path);
}
- thread_load_mutex->unlock();
-
return status;
}
Ref<Resource> ResourceLoader::load_threaded_get(const String &p_path, Error *r_error) {
- String local_path = _validate_local_path(p_path);
-
- MutexLock thread_load_lock(*thread_load_mutex);
- if (!thread_load_tasks.has(local_path)) {
- if (r_error) {
- *r_error = ERR_INVALID_PARAMETER;
- }
- return Ref<Resource>();
+ if (r_error) {
+ *r_error = OK;
}
- ThreadLoadTask &load_task = thread_load_tasks[local_path];
+ Ref<Resource> res;
+ {
+ MutexLock thread_load_lock(thread_load_mutex);
- if (load_task.status == THREAD_LOAD_IN_PROGRESS) {
- if (load_task.loader_id == Thread::get_caller_id()) {
- // Load is in progress, but it's precisely this thread the one in charge.
- // That means this is a cyclic load.
+ if (!user_load_tokens.has(p_path)) {
+ print_verbose("load_threaded_get(): No threaded load for resource path '" + p_path + "' has been initiated or its result has already been collected.");
if (r_error) {
- *r_error = ERR_BUSY;
+ *r_error = ERR_INVALID_PARAMETER;
}
return Ref<Resource>();
- } else if (!load_task.cond_var) {
- // Load is in progress, but a condition variable was never created for it.
- // That happens when a load has been initiated with subthreads disabled,
- // but now another load thread needs to interact with this one (either
- // because of subthreads being used this time, or because it's simply a
- // threaded load running on a different thread).
- // Since we want to be notified when the load ends, we must create the
- // condition variable now.
- load_task.cond_var = memnew(ConditionVariable);
}
- }
-
- //cond var still exists, meaning it's still loading, request poll
- if (load_task.cond_var) {
- {
- // As we got a cond var, this means we are going to have to wait
- // until the sub-resource is done loading
- //
- // As this thread will become 'blocked' we should "exchange" its
- // active status with a waiting one, to ensure load continues.
- //
- // This ensures loading is never blocked and that is also within
- // the maximum number of active threads.
-
- if (thread_waiting_count > 0) {
- thread_waiting_count--;
- thread_loading_count++;
- thread_load_semaphore->post();
-
- load_task.start_next = false; //do not start next since we are doing it here
- }
-
- thread_suspended_count++;
-
- print_lt("GET: load count: " + itos(thread_loading_count) + " / wait count: " + itos(thread_waiting_count) + " / suspended count: " + itos(thread_suspended_count) + " / active: " + itos(thread_loading_count - thread_suspended_count));
- }
-
- bool still_valid = true;
- bool was_thread = load_task.thread;
- do {
- load_task.cond_var->wait(thread_load_lock);
- if (!thread_load_tasks.has(local_path)) { //may have been erased during unlock and this was always an invalid call
- still_valid = false;
- break;
- }
- } while (load_task.cond_var); // In case of spurious wakeup.
- if (was_thread) {
- thread_suspended_count--;
- }
-
- if (!still_valid) {
+ LoadToken *load_token = user_load_tokens[p_path];
+ if (!load_token) {
+ // This happens if requested from one thread and rapidly querying from another.
if (r_error) {
- *r_error = ERR_INVALID_PARAMETER;
+ *r_error = ERR_BUSY;
}
return Ref<Resource>();
}
+ res = _load_complete_inner(*load_token, r_error, thread_load_lock);
+ if (load_token->unreference()) {
+ memdelete(load_token);
+ }
}
- Ref<Resource> resource = load_task.resource;
- if (r_error) {
- *r_error = load_task.error;
- }
-
- load_task.requests--;
+ print_lt("GET: user load tokens: " + itos(user_load_tokens.size()));
- if (load_task.requests == 0) {
- if (load_task.thread) { //thread may not have been used
- load_task.thread->wait_to_finish();
- memdelete(load_task.thread);
- }
- thread_load_tasks.erase(local_path);
- }
+ return res;
+}
- return resource;
+Ref<Resource> ResourceLoader::_load_complete(LoadToken &p_load_token, Error *r_error) {
+ MutexLock thread_load_lock(thread_load_mutex);
+ return _load_complete_inner(p_load_token, r_error, thread_load_lock);
}
-Ref<Resource> ResourceLoader::load(const String &p_path, const String &p_type_hint, ResourceFormatLoader::CacheMode p_cache_mode, Error *r_error) {
+Ref<Resource> ResourceLoader::_load_complete_inner(LoadToken &p_load_token, Error *r_error, MutexLock<SafeBinaryMutex<BINARY_MUTEX_TAG>> &p_thread_load_lock) {
if (r_error) {
- *r_error = ERR_CANT_OPEN;
+ *r_error = OK;
}
- String local_path = _validate_local_path(p_path);
+ if (!p_load_token.local_path.is_empty()) {
+ if (!thread_load_tasks.has(p_load_token.local_path)) {
+#ifdef DEV_ENABLED
+ CRASH_NOW();
+#endif
+ // On non-dev, be defensive and at least avoid crashing (at this point at least).
+ if (r_error) {
+ *r_error = ERR_BUG;
+ }
+ return Ref<Resource>();
+ }
- if (p_cache_mode != ResourceFormatLoader::CACHE_MODE_IGNORE) {
- thread_load_mutex->lock();
+ ThreadLoadTask &load_task = thread_load_tasks[p_load_token.local_path];
- //Is it already being loaded? poll until done
- if (thread_load_tasks.has(local_path)) {
- Error err = load_threaded_request(p_path, p_type_hint);
- if (err != OK) {
+ if (load_task.status == THREAD_LOAD_IN_PROGRESS) {
+ DEV_ASSERT((load_task.task_id == 0) != (load_task.thread_id == 0));
+
+ if ((load_task.task_id != 0 && load_task.task_id == caller_task_id) ||
+ (load_task.thread_id != 0 && load_task.thread_id == Thread::get_caller_id())) {
+ // Load is in progress, but it's precisely this thread the one in charge.
+ // That means this is a cyclic load.
if (r_error) {
- *r_error = err;
+ *r_error = ERR_BUSY;
}
- thread_load_mutex->unlock();
return Ref<Resource>();
}
- thread_load_mutex->unlock();
- return load_threaded_get(p_path, r_error);
- }
-
- //Is it cached?
-
- Ref<Resource> existing = ResourceCache::get_ref(local_path);
-
- if (existing.is_valid()) {
- thread_load_mutex->unlock();
-
- if (r_error) {
- *r_error = OK;
+ if (load_task.task_id != 0 && !load_task.awaited) {
+ // Loading thread is in the worker pool and still not awaited.
+ load_task.awaited = true;
+ thread_load_mutex.unlock();
+ WorkerThreadPool::get_singleton()->wait_for_task_completion(load_task.task_id);
+ thread_load_mutex.lock();
+ } else {
+ // Loading thread is main or user thread, or in the worker pool, but already awaited by some other thread.
+ if (!load_task.cond_var) {
+ load_task.cond_var = memnew(ConditionVariable);
+ }
+ do {
+ load_task.cond_var->wait(p_thread_load_lock);
+ DEV_ASSERT(thread_load_tasks.has(p_load_token.local_path) && p_load_token.get_reference_count());
+ } while (load_task.cond_var);
}
-
- return existing; //use cached
}
- //load using task (but this thread)
- ThreadLoadTask load_task;
-
- load_task.requests = 1;
- load_task.local_path = local_path;
- load_task.remapped_path = _path_remap(local_path, &load_task.xl_remapped);
- load_task.type_hint = p_type_hint;
- load_task.cache_mode = p_cache_mode; //ignore
- load_task.loader_id = Thread::get_caller_id();
-
- thread_load_tasks[local_path] = load_task;
-
- thread_load_mutex->unlock();
-
- _thread_load_function(&thread_load_tasks[local_path]);
-
- return load_threaded_get(p_path, r_error);
-
- } else {
- bool xl_remapped = false;
- String path = _path_remap(local_path, &xl_remapped);
-
- if (path.is_empty()) {
- ERR_FAIL_V_MSG(Ref<Resource>(), "Remapping '" + local_path + "' failed.");
- }
-
- print_verbose("Loading resource: " + path);
- float p;
- Ref<Resource> res = _load(path, local_path, p_type_hint, p_cache_mode, r_error, false, &p);
-
- if (res.is_null()) {
- print_verbose("Failed loading resource: " + path);
- return Ref<Resource>();
+ if (cleaning_tasks) {
+ load_task.resource = Ref<Resource>();
+ load_task.error = FAILED;
}
- if (xl_remapped) {
- res->set_as_translation_remapped(true);
+ Ref<Resource> resource = load_task.resource;
+ if (r_error) {
+ *r_error = load_task.error;
}
-
-#ifdef TOOLS_ENABLED
-
- res->set_edited(false);
- if (timestamp_on_load) {
- uint64_t mt = FileAccess::get_modified_time(path);
- //printf("mt %s: %lli\n",remapped_path.utf8().get_data(),mt);
- res->set_last_modified_time(mt);
+ return resource;
+ } else {
+ // Special case of an unregistered task.
+ // The resource should have been loaded by now.
+ Ref<Resource> resource = p_load_token.res_if_unregistered;
+ if (!resource.is_valid()) {
+ if (r_error) {
+ *r_error = FAILED;
+ }
}
-#endif
-
- return res;
+ return resource;
}
}
@@ -958,32 +994,42 @@ void ResourceLoader::clear_translation_remaps() {
}
void ResourceLoader::clear_thread_load_tasks() {
- thread_load_mutex->lock();
-
- for (KeyValue<String, ResourceLoader::ThreadLoadTask> &E : thread_load_tasks) {
- switch (E.value.status) {
- case ResourceLoader::ThreadLoadStatus::THREAD_LOAD_LOADED: {
- E.value.resource = Ref<Resource>();
- } break;
-
- case ResourceLoader::ThreadLoadStatus::THREAD_LOAD_IN_PROGRESS: {
- if (E.value.thread != nullptr) {
- E.value.thread->wait_to_finish();
- memdelete(E.value.thread);
- E.value.thread = nullptr;
+ // Bring the thing down as quickly as possible without causing deadlocks or leaks.
+
+ thread_load_mutex.lock();
+ cleaning_tasks = true;
+
+ while (true) {
+ bool none_running = true;
+ if (thread_load_tasks.size()) {
+ for (KeyValue<String, ResourceLoader::ThreadLoadTask> &E : thread_load_tasks) {
+ if (E.value.status == THREAD_LOAD_IN_PROGRESS) {
+ if (E.value.cond_var) {
+ E.value.cond_var->notify_all();
+ memdelete(E.value.cond_var);
+ E.value.cond_var = nullptr;
+ }
+ none_running = false;
}
- E.value.resource = Ref<Resource>();
- } break;
-
- case ResourceLoader::ThreadLoadStatus::THREAD_LOAD_FAILED:
- default: {
- // do nothing
}
}
+ if (none_running) {
+ break;
+ }
+ thread_load_mutex.unlock();
+ OS::get_singleton()->delay_usec(1000);
+ thread_load_mutex.lock();
+ }
+
+ for (KeyValue<String, LoadToken *> &E : user_load_tokens) {
+ memdelete(E.value);
}
+ user_load_tokens.clear();
+
thread_load_tasks.clear();
- thread_load_mutex->unlock();
+ cleaning_tasks = false;
+ thread_load_mutex.unlock();
}
void ResourceLoader::load_path_remaps() {
@@ -1080,20 +1126,14 @@ void ResourceLoader::remove_custom_loaders() {
}
}
-void ResourceLoader::initialize() {
- thread_load_mutex = memnew(SafeBinaryMutex<BINARY_MUTEX_TAG>);
- thread_load_max = OS::get_singleton()->get_processor_count();
- thread_loading_count = 0;
- thread_waiting_count = 0;
- thread_suspended_count = 0;
- thread_load_semaphore = memnew(Semaphore);
+bool ResourceLoader::is_cleaning_tasks() {
+ MutexLock lock(thread_load_mutex);
+ return cleaning_tasks;
}
-void ResourceLoader::finalize() {
- clear_thread_load_tasks();
- memdelete(thread_load_mutex);
- memdelete(thread_load_semaphore);
-}
+void ResourceLoader::initialize() {}
+
+void ResourceLoader::finalize() {}
ResourceLoadErrorNotify ResourceLoader::err_notify = nullptr;
void *ResourceLoader::err_notify_ud = nullptr;
@@ -1105,16 +1145,17 @@ bool ResourceLoader::create_missing_resources_if_class_unavailable = false;
bool ResourceLoader::abort_on_missing_resource = true;
bool ResourceLoader::timestamp_on_load = false;
+thread_local int ResourceLoader::load_nesting = 0;
+thread_local WorkerThreadPool::TaskID ResourceLoader::caller_task_id = 0;
+thread_local Vector<String> ResourceLoader::load_paths_stack;
+
template <>
thread_local uint32_t SafeBinaryMutex<ResourceLoader::BINARY_MUTEX_TAG>::count = 0;
-SafeBinaryMutex<ResourceLoader::BINARY_MUTEX_TAG> *ResourceLoader::thread_load_mutex = nullptr;
+SafeBinaryMutex<ResourceLoader::BINARY_MUTEX_TAG> ResourceLoader::thread_load_mutex;
HashMap<String, ResourceLoader::ThreadLoadTask> ResourceLoader::thread_load_tasks;
-Semaphore *ResourceLoader::thread_load_semaphore = nullptr;
+bool ResourceLoader::cleaning_tasks = false;
-int ResourceLoader::thread_loading_count = 0;
-int ResourceLoader::thread_waiting_count = 0;
-int ResourceLoader::thread_suspended_count = 0;
-int ResourceLoader::thread_load_max = 0;
+HashMap<String, ResourceLoader::LoadToken *> ResourceLoader::user_load_tokens;
SelfList<Resource>::List ResourceLoader::remapped_list;
HashMap<String, Vector<String>> ResourceLoader::translation_remaps;
diff --git a/core/io/resource_loader.h b/core/io/resource_loader.h
index 72c1f90653..ffe9d5de9a 100644
--- a/core/io/resource_loader.h
+++ b/core/io/resource_loader.h
@@ -34,6 +34,7 @@
#include "core/io/resource.h"
#include "core/object/gdvirtual.gen.inc"
#include "core/object/script_language.h"
+#include "core/object/worker_thread_pool.h"
#include "core/os/semaphore.h"
#include "core/os/thread.h"
@@ -107,9 +108,30 @@ public:
THREAD_LOAD_LOADED
};
+ enum LoadThreadMode {
+ LOAD_THREAD_FROM_CURRENT,
+ LOAD_THREAD_SPAWN_SINGLE,
+ LOAD_THREAD_DISTRIBUTE,
+ };
+
+ struct LoadToken : public RefCounted {
+ String local_path;
+ String user_path;
+ Ref<Resource> res_if_unregistered;
+
+ void clear();
+
+ virtual ~LoadToken();
+ };
+
static const int BINARY_MUTEX_TAG = 1;
+ static Ref<LoadToken> _load_start(const String &p_path, const String &p_type_hint, LoadThreadMode p_thread_mode, ResourceFormatLoader::CacheMode p_cache_mode);
+ static Ref<Resource> _load_complete(LoadToken &p_load_token, Error *r_error);
+
private:
+ static Ref<Resource> _load_complete_inner(LoadToken &p_load_token, Error *r_error, MutexLock<SafeBinaryMutex<BINARY_MUTEX_TAG>> &p_thread_load_lock);
+
static Ref<ResourceFormatLoader> loader[MAX_LOADERS];
static int loader_count;
static bool timestamp_on_load;
@@ -129,8 +151,7 @@ private:
static SelfList<Resource>::List remapped_list;
friend class ResourceFormatImporter;
- friend class ResourceInteractiveLoader;
- // Internal load function.
+
static Ref<Resource> _load(const String &p_path, const String &p_original_path, const String &p_type_hint, ResourceFormatLoader::CacheMode p_cache_mode, Error *r_error, bool p_use_sub_threads, float *r_progress);
static ResourceLoadedCallback _loaded_callback;
@@ -138,11 +159,14 @@ private:
static Ref<ResourceFormatLoader> _find_custom_resource_format_loader(String path);
struct ThreadLoadTask {
- Thread *thread = nullptr;
- Thread::ID loader_id = 0;
- ConditionVariable *cond_var = nullptr;
+ WorkerThreadPool::TaskID task_id = 0; // Used if run on a worker thread from the pool.
+ Thread::ID thread_id = 0; // Used if running on an user thread (e.g., simple non-threaded load).
+ bool awaited = false; // If it's in the pool, this helps not awaiting from more than one dependent thread.
+ ConditionVariable *cond_var = nullptr; // In not in the worker pool or already awaiting, this is used as a secondary awaiting mechanism.
+ LoadToken *load_token = nullptr;
String local_path;
String remapped_path;
+ String dependent_path;
String type_hint;
float progress = 0.0;
ThreadLoadStatus status = THREAD_LOAD_IN_PROGRESS;
@@ -151,27 +175,29 @@ private:
Ref<Resource> resource;
bool xl_remapped = false;
bool use_sub_threads = false;
- bool start_next = true;
- int requests = 0;
HashSet<String> sub_tasks;
};
static void _thread_load_function(void *p_userdata);
- static SafeBinaryMutex<BINARY_MUTEX_TAG> *thread_load_mutex;
+
+ static thread_local int load_nesting;
+ static thread_local WorkerThreadPool::TaskID caller_task_id;
+ static thread_local Vector<String> load_paths_stack;
+ static SafeBinaryMutex<BINARY_MUTEX_TAG> thread_load_mutex;
static HashMap<String, ThreadLoadTask> thread_load_tasks;
- static Semaphore *thread_load_semaphore;
- static int thread_waiting_count;
- static int thread_loading_count;
- static int thread_suspended_count;
- static int thread_load_max;
+ static bool cleaning_tasks;
+
+ static HashMap<String, LoadToken *> user_load_tokens;
static float _dependency_get_progress(const String &p_path);
public:
- static Error load_threaded_request(const String &p_path, const String &p_type_hint = "", bool p_use_sub_threads = false, ResourceFormatLoader::CacheMode p_cache_mode = ResourceFormatLoader::CACHE_MODE_REUSE, const String &p_source_resource = String());
+ static Error load_threaded_request(const String &p_path, const String &p_type_hint = "", bool p_use_sub_threads = false, ResourceFormatLoader::CacheMode p_cache_mode = ResourceFormatLoader::CACHE_MODE_REUSE);
static ThreadLoadStatus load_threaded_get_status(const String &p_path, float *r_progress = nullptr);
static Ref<Resource> load_threaded_get(const String &p_path, Error *r_error = nullptr);
+ static bool is_within_load() { return load_nesting > 0; };
+
static Ref<Resource> load(const String &p_path, const String &p_type_hint = "", ResourceFormatLoader::CacheMode p_cache_mode = ResourceFormatLoader::CACHE_MODE_REUSE, Error *r_error = nullptr);
static bool exists(const String &p_path, const String &p_type_hint = "");
@@ -237,6 +263,8 @@ public:
static void set_create_missing_resources_if_class_unavailable(bool p_enable);
_FORCE_INLINE_ static bool is_creating_missing_resources_if_class_unavailable_enabled() { return create_missing_resources_if_class_unavailable; }
+ static bool is_cleaning_tasks();
+
static void initialize();
static void finalize();
};
diff --git a/core/math/basis.cpp b/core/math/basis.cpp
index 95a4187062..bfd902c7e2 100644
--- a/core/math/basis.cpp
+++ b/core/math/basis.cpp
@@ -807,8 +807,8 @@ void Basis::get_axis_angle(Vector3 &r_axis, real_t &r_angle) const {
z = (rows[1][0] - rows[0][1]) / s;
r_axis = Vector3(x, y, z);
- // CLAMP to avoid NaN if the value passed to acos is not in [0,1].
- r_angle = Math::acos(CLAMP((rows[0][0] + rows[1][1] + rows[2][2] - 1) / 2, (real_t)0.0, (real_t)1.0));
+ // acos does clamping.
+ r_angle = Math::acos((rows[0][0] + rows[1][1] + rows[2][2] - 1) / 2);
}
void Basis::set_quaternion(const Quaternion &p_quaternion) {
diff --git a/core/math/color.cpp b/core/math/color.cpp
index f4b8903157..0d9325f236 100644
--- a/core/math/color.cpp
+++ b/core/math/color.cpp
@@ -188,32 +188,6 @@ float Color::get_v() const {
return max;
}
-float Color::get_hsl_h() const {
- return get_h();
-}
-
-float Color::get_hsl_s() const {
- float min = MIN(MIN(r, g), b);
- float max = MAX(MAX(r, g), b);
-
- float mid = (min + max) / 2.0f;
-
- if (mid == 0.0f || mid == 1.0f) {
- return 0.0f;
- }
-
- float delta = max - min;
-
- return delta / (1.0f - Math::abs(2.0f * mid - 1.0f));
-}
-
-float Color::get_hsl_l() const {
- float min = MIN(MIN(r, g), b);
- float max = MAX(MAX(r, g), b);
-
- return (min + max) / 2.0f;
-}
-
void Color::set_hsv(float p_h, float p_s, float p_v, float p_alpha) {
int i;
float f, p, q, t;
@@ -268,59 +242,6 @@ void Color::set_hsv(float p_h, float p_s, float p_v, float p_alpha) {
}
}
-void Color::set_hsl(float p_h, float p_s, float p_l, float p_alpha) {
- a = p_alpha;
-
- if (p_s == 0.0f) {
- // Achromatic (gray)
- r = g = b = p_l;
- return;
- }
-
- p_h *= 6.0f;
- p_h = Math::fmod(p_h, 6.0f);
-
- float c = (1.0f - Math::abs(2.0f * p_l - 1.0f)) * p_s;
- float x = c * (1.0f - Math::abs(Math::fmod(p_h, 2.0f) - 1.0f));
- float m = p_l - c / 2.0f;
-
- c += m;
- x += m;
-
- switch ((int)p_h) {
- case 0: // Red is the dominant color
- r = c;
- g = x;
- b = m;
- break;
- case 1: // Green is the dominant color
- r = x;
- g = c;
- b = m;
- break;
- case 2:
- r = m;
- g = c;
- b = x;
- break;
- case 3: // Blue is the dominant color
- r = m;
- g = x;
- b = c;
- break;
- case 4:
- r = x;
- g = m;
- b = c;
- break;
- default: // (5) Red is the dominant color
- r = c;
- g = m;
- b = x;
- break;
- }
-}
-
void Color::set_ok_hsl(float p_h, float p_s, float p_l, float p_alpha) {
ok_color::HSL hsl;
hsl.h = p_h;
@@ -547,12 +468,6 @@ Color Color::from_hsv(float p_h, float p_s, float p_v, float p_alpha) {
return c;
}
-Color Color::from_hsl(float p_h, float p_s, float p_l, float p_alpha) {
- Color c;
- c.set_hsl(p_h, p_s, p_l, p_alpha);
- return c;
-}
-
Color Color::from_rgbe9995(uint32_t p_rgbe) {
float r = p_rgbe & 0x1ff;
float g = (p_rgbe >> 9) & 0x1ff;
diff --git a/core/math/color.h b/core/math/color.h
index 4a056335c1..65d7377c1c 100644
--- a/core/math/color.h
+++ b/core/math/color.h
@@ -57,10 +57,6 @@ struct _NO_DISCARD_ Color {
float get_s() const;
float get_v() const;
void set_hsv(float p_h, float p_s, float p_v, float p_alpha = 1.0f);
- float get_hsl_h() const;
- float get_hsl_s() const;
- float get_hsl_l() const;
- void set_hsl(float p_h, float p_s, float p_l, float p_alpha = 1.0f);
float get_ok_hsl_h() const;
float get_ok_hsl_s() const;
float get_ok_hsl_l() const;
@@ -202,7 +198,6 @@ struct _NO_DISCARD_ Color {
static Color get_named_color(int p_idx);
static Color from_string(const String &p_string, const Color &p_default);
static Color from_hsv(float p_h, float p_s, float p_v, float p_alpha = 1.0f);
- static Color from_hsl(float p_h, float p_s, float p_l, float p_alpha = 1.0f);
static Color from_ok_hsl(float p_h, float p_s, float p_l, float p_alpha = 1.0f);
static Color from_rgbe9995(uint32_t p_rgbe);
@@ -222,9 +217,6 @@ struct _NO_DISCARD_ Color {
_FORCE_INLINE_ void set_h(float p_h) { set_hsv(p_h, get_s(), get_v(), a); }
_FORCE_INLINE_ void set_s(float p_s) { set_hsv(get_h(), p_s, get_v(), a); }
_FORCE_INLINE_ void set_v(float p_v) { set_hsv(get_h(), get_s(), p_v, a); }
- _FORCE_INLINE_ void set_hsl_h(float p_h) { set_hsl(p_h, get_hsl_s(), get_hsl_l(), a); }
- _FORCE_INLINE_ void set_hsl_s(float p_s) { set_hsl(get_hsl_h(), p_s, get_hsl_l(), a); }
- _FORCE_INLINE_ void set_hsl_l(float p_l) { set_hsl(get_hsl_h(), get_hsl_s(), p_l, a); }
_FORCE_INLINE_ void set_ok_hsl_h(float p_h) { set_ok_hsl(p_h, get_ok_hsl_s(), get_ok_hsl_l(), a); }
_FORCE_INLINE_ void set_ok_hsl_s(float p_s) { set_ok_hsl(get_ok_hsl_h(), p_s, get_ok_hsl_l(), a); }
_FORCE_INLINE_ void set_ok_hsl_l(float p_l) { set_ok_hsl(get_ok_hsl_h(), get_ok_hsl_s(), p_l, a); }
diff --git a/core/math/convex_hull.cpp b/core/math/convex_hull.cpp
index a03438a339..76b3062944 100644
--- a/core/math/convex_hull.cpp
+++ b/core/math/convex_hull.cpp
@@ -596,9 +596,9 @@ private:
}
};
- enum Orientation { NONE,
- CLOCKWISE,
- COUNTER_CLOCKWISE };
+ enum Orientation { ORIENTATION_NONE,
+ ORIENTATION_CLOCKWISE,
+ ORIENTATION_COUNTER_CLOCKWISE };
Vector3 scaling;
Vector3 center;
@@ -1140,13 +1140,13 @@ ConvexHullInternal::Orientation ConvexHullInternal::get_orientation(const Edge *
CHULL_ASSERT(!m.is_zero());
int64_t dot = n.dot(m);
CHULL_ASSERT(dot != 0);
- return (dot > 0) ? COUNTER_CLOCKWISE : CLOCKWISE;
+ return (dot > 0) ? ORIENTATION_COUNTER_CLOCKWISE : ORIENTATION_CLOCKWISE;
}
- return COUNTER_CLOCKWISE;
+ return ORIENTATION_COUNTER_CLOCKWISE;
} else if (p_prev->prev == p_next) {
- return CLOCKWISE;
+ return ORIENTATION_CLOCKWISE;
} else {
- return NONE;
+ return ORIENTATION_NONE;
}
}
@@ -1176,7 +1176,7 @@ ConvexHullInternal::Edge *ConvexHullInternal::find_max_angle(bool p_ccw, const V
} else if ((cmp = cot.compare(p_min_cot)) < 0) {
p_min_cot = cot;
min_edge = e;
- } else if ((cmp == 0) && (p_ccw == (get_orientation(min_edge, e, p_s, t) == COUNTER_CLOCKWISE))) {
+ } else if ((cmp == 0) && (p_ccw == (get_orientation(min_edge, e, p_s, t) == ORIENTATION_COUNTER_CLOCKWISE))) {
min_edge = e;
}
}
@@ -1375,7 +1375,7 @@ void ConvexHullInternal::merge(IntermediateHull &p_h0, IntermediateHull &p_h1) {
int64_t dot = (*e->target - *c0).dot(normal);
CHULL_ASSERT(dot <= 0);
if ((dot == 0) && ((*e->target - *c0).dot(t) > 0)) {
- if (!start0 || (get_orientation(start0, e, s, Point32(0, 0, -1)) == CLOCKWISE)) {
+ if (!start0 || (get_orientation(start0, e, s, Point32(0, 0, -1)) == ORIENTATION_CLOCKWISE)) {
start0 = e;
}
}
@@ -1390,7 +1390,7 @@ void ConvexHullInternal::merge(IntermediateHull &p_h0, IntermediateHull &p_h1) {
int64_t dot = (*e->target - *c1).dot(normal);
CHULL_ASSERT(dot <= 0);
if ((dot == 0) && ((*e->target - *c1).dot(t) > 0)) {
- if (!start1 || (get_orientation(start1, e, s, Point32(0, 0, -1)) == COUNTER_CLOCKWISE)) {
+ if (!start1 || (get_orientation(start1, e, s, Point32(0, 0, -1)) == ORIENTATION_COUNTER_CLOCKWISE)) {
start1 = e;
}
}
diff --git a/core/math/math_funcs.h b/core/math/math_funcs.h
index 078320d620..f96d3a909f 100644
--- a/core/math/math_funcs.h
+++ b/core/math/math_funcs.h
@@ -74,11 +74,13 @@ public:
static _ALWAYS_INLINE_ double tanh(double p_x) { return ::tanh(p_x); }
static _ALWAYS_INLINE_ float tanh(float p_x) { return ::tanhf(p_x); }
- static _ALWAYS_INLINE_ double asin(double p_x) { return ::asin(p_x); }
- static _ALWAYS_INLINE_ float asin(float p_x) { return ::asinf(p_x); }
+ // Always does clamping so always safe to use.
+ static _ALWAYS_INLINE_ double asin(double p_x) { return p_x < -1 ? (-Math_PI / 2) : (p_x > 1 ? (Math_PI / 2) : ::asin(p_x)); }
+ static _ALWAYS_INLINE_ float asin(float p_x) { return p_x < -1 ? (-Math_PI / 2) : (p_x > 1 ? (Math_PI / 2) : ::asinf(p_x)); }
- static _ALWAYS_INLINE_ double acos(double p_x) { return ::acos(p_x); }
- static _ALWAYS_INLINE_ float acos(float p_x) { return ::acosf(p_x); }
+ // Always does clamping so always safe to use.
+ static _ALWAYS_INLINE_ double acos(double p_x) { return p_x < -1 ? Math_PI : (p_x > 1 ? 0 : ::acos(p_x)); }
+ static _ALWAYS_INLINE_ float acos(float p_x) { return p_x < -1 ? Math_PI : (p_x > 1 ? 0 : ::acosf(p_x)); }
static _ALWAYS_INLINE_ double atan(double p_x) { return ::atan(p_x); }
static _ALWAYS_INLINE_ float atan(float p_x) { return ::atanf(p_x); }
diff --git a/core/math/quaternion.cpp b/core/math/quaternion.cpp
index 34e212a5b6..e4ad17c8ef 100644
--- a/core/math/quaternion.cpp
+++ b/core/math/quaternion.cpp
@@ -35,7 +35,8 @@
real_t Quaternion::angle_to(const Quaternion &p_to) const {
real_t d = dot(p_to);
- return Math::acos(CLAMP(d * d * 2 - 1, -1, 1));
+ // acos does clamping.
+ return Math::acos(d * d * 2 - 1);
}
Vector3 Quaternion::get_euler(EulerOrder p_order) const {
diff --git a/core/math/static_raycaster.h b/core/math/static_raycaster.h
index 1bafc29c57..c53868e12d 100644
--- a/core/math/static_raycaster.h
+++ b/core/math/static_raycaster.h
@@ -59,15 +59,15 @@ public:
/*! Constructs a ray from origin, direction, and ray segment. Near
* has to be smaller than far. */
- _FORCE_INLINE_ Ray(const Vector3 &org,
- const Vector3 &dir,
- float tnear = 0.0f,
- float tfar = INFINITY) :
- org(org),
- tnear(tnear),
- dir(dir),
+ _FORCE_INLINE_ Ray(const Vector3 &p_org,
+ const Vector3 &p_dir,
+ float p_tnear = 0.0f,
+ float p_tfar = INFINITY) :
+ org(p_org),
+ tnear(p_tnear),
+ dir(p_dir),
time(0.0f),
- tfar(tfar),
+ tfar(p_tfar),
mask(-1),
u(0.0),
v(0.0),
diff --git a/core/object/message_queue.cpp b/core/object/message_queue.cpp
index 05f4e2a8a6..55ea5f5ecd 100644
--- a/core/object/message_queue.cpp
+++ b/core/object/message_queue.cpp
@@ -35,14 +35,23 @@
#include "core/object/class_db.h"
#include "core/object/script_language.h"
+#define LOCK_MUTEX \
+ if (this != MessageQueue::thread_singleton) { \
+ mutex.lock(); \
+ }
+
+#define UNLOCK_MUTEX \
+ if (this != MessageQueue::thread_singleton) { \
+ mutex.unlock(); \
+ }
+
void CallQueue::_add_page() {
- if (pages_used == page_messages.size()) {
+ if (pages_used == page_bytes.size()) {
pages.push_back(allocator->alloc());
- page_messages.push_back(0);
+ page_bytes.push_back(0);
}
- page_messages[pages_used] = 0;
+ page_bytes[pages_used] = 0;
pages_used++;
- page_offset = 0;
}
Error CallQueue::push_callp(ObjectID p_id, const StringName &p_method, const Variant **p_args, int p_argcount, bool p_show_error) {
@@ -66,15 +75,15 @@ Error CallQueue::push_callablep(const Callable &p_callable, const Variant **p_ar
ERR_FAIL_COND_V_MSG(room_needed > uint32_t(PAGE_SIZE_BYTES), ERR_INVALID_PARAMETER, "Message is too large to fit on a page (" + itos(PAGE_SIZE_BYTES) + " bytes), consider passing less arguments.");
- mutex.lock();
+ LOCK_MUTEX;
_ensure_first_page();
- if ((page_offset + room_needed) > uint32_t(PAGE_SIZE_BYTES)) {
+ if ((page_bytes[pages_used - 1] + room_needed) > uint32_t(PAGE_SIZE_BYTES)) {
if (pages_used == max_pages) {
ERR_PRINT("Failed method: " + p_callable + ". Message queue out of memory. " + error_text);
statistics();
- mutex.unlock();
+ UNLOCK_MUTEX;
return ERR_OUT_OF_MEMORY;
}
_add_page();
@@ -82,7 +91,7 @@ Error CallQueue::push_callablep(const Callable &p_callable, const Variant **p_ar
Page *page = pages[pages_used - 1];
- uint8_t *buffer_end = &page->data[page_offset];
+ uint8_t *buffer_end = &page->data[page_bytes[pages_used - 1]];
Message *msg = memnew_placement(buffer_end, Message);
msg->args = p_argcount;
@@ -104,21 +113,20 @@ Error CallQueue::push_callablep(const Callable &p_callable, const Variant **p_ar
*v = *p_args[i];
}
- page_messages[pages_used - 1]++;
- page_offset += room_needed;
+ page_bytes[pages_used - 1] += room_needed;
- mutex.unlock();
+ UNLOCK_MUTEX;
return OK;
}
Error CallQueue::push_set(ObjectID p_id, const StringName &p_prop, const Variant &p_value) {
- mutex.lock();
+ LOCK_MUTEX;
uint32_t room_needed = sizeof(Message) + sizeof(Variant);
_ensure_first_page();
- if ((page_offset + room_needed) > uint32_t(PAGE_SIZE_BYTES)) {
+ if ((page_bytes[pages_used - 1] + room_needed) > uint32_t(PAGE_SIZE_BYTES)) {
if (pages_used == max_pages) {
String type;
if (ObjectDB::get_instance(p_id)) {
@@ -127,14 +135,14 @@ Error CallQueue::push_set(ObjectID p_id, const StringName &p_prop, const Variant
ERR_PRINT("Failed set: " + type + ":" + p_prop + " target ID: " + itos(p_id) + ". Message queue out of memory. " + error_text);
statistics();
- mutex.unlock();
+ UNLOCK_MUTEX;
return ERR_OUT_OF_MEMORY;
}
_add_page();
}
Page *page = pages[pages_used - 1];
- uint8_t *buffer_end = &page->data[page_offset];
+ uint8_t *buffer_end = &page->data[page_bytes[pages_used - 1]];
Message *msg = memnew_placement(buffer_end, Message);
msg->args = 1;
@@ -146,32 +154,31 @@ Error CallQueue::push_set(ObjectID p_id, const StringName &p_prop, const Variant
Variant *v = memnew_placement(buffer_end, Variant);
*v = p_value;
- page_messages[pages_used - 1]++;
- page_offset += room_needed;
- mutex.unlock();
+ page_bytes[pages_used - 1] += room_needed;
+ UNLOCK_MUTEX;
return OK;
}
Error CallQueue::push_notification(ObjectID p_id, int p_notification) {
ERR_FAIL_COND_V(p_notification < 0, ERR_INVALID_PARAMETER);
- mutex.lock();
+ LOCK_MUTEX;
uint32_t room_needed = sizeof(Message);
_ensure_first_page();
- if ((page_offset + room_needed) > uint32_t(PAGE_SIZE_BYTES)) {
+ if ((page_bytes[pages_used - 1] + room_needed) > uint32_t(PAGE_SIZE_BYTES)) {
if (pages_used == max_pages) {
ERR_PRINT("Failed notification: " + itos(p_notification) + " target ID: " + itos(p_id) + ". Message queue out of memory. " + error_text);
statistics();
- mutex.unlock();
+ UNLOCK_MUTEX;
return ERR_OUT_OF_MEMORY;
}
_add_page();
}
Page *page = pages[pages_used - 1];
- uint8_t *buffer_end = &page->data[page_offset];
+ uint8_t *buffer_end = &page->data[page_bytes[pages_used - 1]];
Message *msg = memnew_placement(buffer_end, Message);
@@ -180,9 +187,8 @@ Error CallQueue::push_notification(ObjectID p_id, int p_notification) {
//msg->target;
msg->notification = p_notification;
- page_messages[pages_used - 1]++;
- page_offset += room_needed;
- mutex.unlock();
+ page_bytes[pages_used - 1] += room_needed;
+ UNLOCK_MUTEX;
return OK;
}
@@ -205,26 +211,77 @@ void CallQueue::_call_function(const Callable &p_callable, const Variant *p_args
}
Error CallQueue::flush() {
- mutex.lock();
+ LOCK_MUTEX;
+
+ // Non-main threads are not meant to be flushed, but appended to the main one.
+ if (this != MessageQueue::main_singleton) {
+ if (pages.size() == 0) {
+ return OK;
+ }
+
+ CallQueue *mq = MessageQueue::main_singleton;
+ DEV_ASSERT(!mq->allocator_is_custom && !allocator_is_custom); // Transferring pages is only safe if using the same alloator parameters.
+
+ mq->mutex.lock();
+
+ // Here we're transferring the data from this queue to the main one.
+ // However, it's very unlikely big amounts of messages will be queued here,
+ // so PagedArray/Pool would be overkill. Also, in most cases the data will fit
+ // an already existing page of the main queue.
+
+ // Let's see if our first (likely only) page fits the current target queue page.
+ uint32_t src_page = 0;
+ {
+ if (mq->pages_used) {
+ uint32_t dst_page = mq->pages_used - 1;
+ uint32_t dst_offset = mq->page_bytes[dst_page];
+ if (dst_offset + page_bytes[0] < uint32_t(PAGE_SIZE_BYTES)) {
+ memcpy(mq->pages[dst_page] + dst_offset, pages[0], page_bytes[0]);
+ src_page++;
+ }
+ }
+ }
+
+ // Any other possibly existing source page needs to be added.
+
+ if (mq->pages_used + (pages_used - src_page) > mq->max_pages) {
+ ERR_PRINT("Failed appending thread queue. Message queue out of memory. " + mq->error_text);
+ mq->statistics();
+ mq->mutex.unlock();
+ return ERR_OUT_OF_MEMORY;
+ }
+
+ for (; src_page < pages_used; src_page++) {
+ mq->_add_page();
+ memcpy(mq->pages[mq->pages_used - 1], pages[src_page], page_bytes[src_page]);
+ mq->page_bytes[mq->pages_used - 1] = page_bytes[src_page];
+ }
+
+ mq->mutex.unlock();
+
+ page_bytes[0] = 0;
+ pages_used = 1;
+
+ return OK;
+ }
if (pages.size() == 0) {
// Never allocated
- mutex.unlock();
+ UNLOCK_MUTEX;
return OK; // Do nothing.
}
if (flushing) {
- mutex.unlock();
+ UNLOCK_MUTEX;
return ERR_BUSY;
}
flushing = true;
uint32_t i = 0;
- uint32_t j = 0;
uint32_t offset = 0;
- while (i < pages_used && j < page_messages[i]) {
+ while (i < pages_used && offset < page_bytes[i]) {
Page *page = pages[i];
//lock on each iteration, so a call can re-add itself to the message queue
@@ -241,7 +298,7 @@ Error CallQueue::flush() {
Object *target = message->callable.get_object();
- mutex.unlock();
+ UNLOCK_MUTEX;
switch (message->type & FLAG_MASK) {
case TYPE_CALL: {
@@ -272,35 +329,32 @@ Error CallQueue::flush() {
message->~Message();
- mutex.lock();
- j++;
- if (j == page_messages[i]) {
- j = 0;
+ LOCK_MUTEX;
+ if (offset == page_bytes[i]) {
i++;
offset = 0;
}
}
- page_messages[0] = 0;
- page_offset = 0;
+ page_bytes[0] = 0;
pages_used = 1;
flushing = false;
- mutex.unlock();
+ UNLOCK_MUTEX;
return OK;
}
void CallQueue::clear() {
- mutex.lock();
+ LOCK_MUTEX;
if (pages.size() == 0) {
- mutex.unlock();
+ UNLOCK_MUTEX;
return; // Nothing to clear.
}
for (uint32_t i = 0; i < pages_used; i++) {
uint32_t offset = 0;
- for (uint32_t j = 0; j < page_messages[i]; j++) {
+ while (offset < page_bytes[i]) {
Page *page = pages[i];
//lock on each iteration, so a call can re-add itself to the message queue
@@ -312,7 +366,6 @@ void CallQueue::clear() {
advance += sizeof(Variant) * message->args;
}
- //pre-advance so this function is reentrant
offset += advance;
if ((message->type & FLAG_MASK) != TYPE_NOTIFICATION) {
@@ -327,14 +380,13 @@ void CallQueue::clear() {
}
pages_used = 1;
- page_offset = 0;
- page_messages[0] = 0;
+ page_bytes[0] = 0;
- mutex.unlock();
+ UNLOCK_MUTEX;
}
void CallQueue::statistics() {
- mutex.lock();
+ LOCK_MUTEX;
HashMap<StringName, int> set_count;
HashMap<int, int> notify_count;
HashMap<Callable, int> call_count;
@@ -342,7 +394,7 @@ void CallQueue::statistics() {
for (uint32_t i = 0; i < pages_used; i++) {
uint32_t offset = 0;
- for (uint32_t j = 0; j < page_messages[i]; j++) {
+ while (offset < page_bytes[i]) {
Page *page = pages[i];
//lock on each iteration, so a call can re-add itself to the message queue
@@ -397,7 +449,6 @@ void CallQueue::statistics() {
null_count++;
}
- //pre-advance so this function is reentrant
offset += advance;
if ((message->type & FLAG_MASK) != TYPE_NOTIFICATION) {
@@ -426,7 +477,7 @@ void CallQueue::statistics() {
print_line("NOTIFY " + itos(E.key) + ": " + itos(E.value));
}
- mutex.unlock();
+ UNLOCK_MUTEX;
}
bool CallQueue::is_flushing() const {
@@ -437,7 +488,7 @@ bool CallQueue::has_messages() const {
if (pages_used == 0) {
return false;
}
- if (pages_used == 1 && page_messages[0] == 0) {
+ if (pages_used == 1 && page_bytes[0] == 0) {
return false;
}
@@ -473,16 +524,21 @@ CallQueue::~CallQueue() {
//////////////////////
-MessageQueue *MessageQueue::singleton = nullptr;
+CallQueue *MessageQueue::main_singleton = nullptr;
+thread_local CallQueue *MessageQueue::thread_singleton = nullptr;
+
+void MessageQueue::set_thread_singleton_override(CallQueue *p_thread_singleton) {
+ thread_singleton = p_thread_singleton;
+}
MessageQueue::MessageQueue() :
CallQueue(nullptr,
int(GLOBAL_DEF_RST(PropertyInfo(Variant::INT, "memory/limits/message_queue/max_size_mb", PROPERTY_HINT_RANGE, "1,512,1,or_greater"), 32)) * 1024 * 1024 / PAGE_SIZE_BYTES,
"Message queue out of memory. Try increasing 'memory/limits/message_queue/max_size_mb' in project settings.") {
- ERR_FAIL_COND_MSG(singleton != nullptr, "A MessageQueue singleton already exists.");
- singleton = this;
+ ERR_FAIL_COND_MSG(main_singleton != nullptr, "A MessageQueue singleton already exists.");
+ main_singleton = this;
}
MessageQueue::~MessageQueue() {
- singleton = nullptr;
+ main_singleton = nullptr;
}
diff --git a/core/object/message_queue.h b/core/object/message_queue.h
index fe261f840e..c6fcccbd58 100644
--- a/core/object/message_queue.h
+++ b/core/object/message_queue.h
@@ -70,10 +70,9 @@ private:
bool allocator_is_custom = false;
LocalVector<Page *> pages;
- LocalVector<uint32_t> page_messages;
+ LocalVector<uint32_t> page_bytes;
uint32_t max_pages = 0;
uint32_t pages_used = 0;
- uint32_t page_offset = 0;
bool flushing = false;
struct Message {
@@ -88,7 +87,7 @@ private:
_FORCE_INLINE_ void _ensure_first_page() {
if (unlikely(pages.is_empty())) {
pages.push_back(allocator->alloc());
- page_messages.push_back(0);
+ page_bytes.push_back(0);
pages_used = 1;
}
}
@@ -153,10 +152,15 @@ public:
};
class MessageQueue : public CallQueue {
- static MessageQueue *singleton;
+ static CallQueue *main_singleton;
+ static thread_local CallQueue *thread_singleton;
+ friend class CallQueue;
public:
- _FORCE_INLINE_ static MessageQueue *get_singleton() { return singleton; }
+ _FORCE_INLINE_ static CallQueue *get_singleton() { return thread_singleton ? thread_singleton : main_singleton; }
+
+ static void set_thread_singleton_override(CallQueue *p_thread_singleton);
+
MessageQueue();
~MessageQueue();
};
diff --git a/core/object/object.h b/core/object/object.h
index c633208d7c..ae22851c15 100644
--- a/core/object/object.h
+++ b/core/object/object.h
@@ -61,7 +61,6 @@ enum PropertyHint {
PROPERTY_HINT_LAYERS_3D_RENDER,
PROPERTY_HINT_LAYERS_3D_PHYSICS,
PROPERTY_HINT_LAYERS_3D_NAVIGATION,
- PROPERTY_HINT_LAYERS_AVOIDANCE,
PROPERTY_HINT_FILE, ///< a file path must be passed, hint_text (optionally) is a filter "*.png,*.wav,*.doc,"
PROPERTY_HINT_DIR, ///< a directory path must be passed
PROPERTY_HINT_GLOBAL_FILE, ///< a file path must be passed, hint_text (optionally) is a filter "*.png,*.wav,*.doc,"
@@ -86,6 +85,7 @@ enum PropertyHint {
PROPERTY_HINT_NODE_TYPE, ///< a node object type
PROPERTY_HINT_HIDE_QUATERNION_EDIT, /// Only Node3D::transform should hide the quaternion editor.
PROPERTY_HINT_PASSWORD,
+ PROPERTY_HINT_LAYERS_AVOIDANCE,
PROPERTY_HINT_MAX,
};
diff --git a/core/object/worker_thread_pool.cpp b/core/object/worker_thread_pool.cpp
index 721c8d0a10..3dca6b73a6 100644
--- a/core/object/worker_thread_pool.cpp
+++ b/core/object/worker_thread_pool.cpp
@@ -140,9 +140,9 @@ void WorkerThreadPool::_process_task(Task *p_task) {
task_queue.add_last(&low_prio_task->task_elem);
post = true;
} else {
- low_priority_threads_used.decrement();
+ low_priority_threads_used--;
}
- task_mutex.lock();
+ task_mutex.unlock();
if (post) {
task_available_semaphore.post();
}
@@ -152,7 +152,7 @@ void WorkerThreadPool::_process_task(Task *p_task) {
void WorkerThreadPool::_thread_function(void *p_user) {
while (true) {
singleton->task_available_semaphore.wait();
- if (singleton->exit_threads.is_set()) {
+ if (singleton->exit_threads) {
break;
}
singleton->_process_task_queue();
@@ -168,14 +168,13 @@ void WorkerThreadPool::_post_task(Task *p_task, bool p_high_priority) {
task_mutex.lock();
p_task->low_priority = !p_high_priority;
if (!p_high_priority && use_native_low_priority_threads) {
- task_mutex.unlock();
p_task->low_priority_thread = native_thread_allocator.alloc();
+ task_mutex.unlock();
p_task->low_priority_thread->start(_native_low_priority_thread_function, p_task); // Pask task directly to thread.
-
- } else if (p_high_priority || low_priority_threads_used.get() < max_low_priority_threads) {
+ } else if (p_high_priority || low_priority_threads_used < max_low_priority_threads) {
task_queue.add_last(&p_task->task_elem);
if (!p_high_priority) {
- low_priority_threads_used.increment();
+ low_priority_threads_used++;
}
task_mutex.unlock();
task_available_semaphore.post();
@@ -251,6 +250,8 @@ void WorkerThreadPool::wait_for_task_completion(TaskID p_task_id) {
if (use_native_low_priority_threads && task->low_priority) {
task->low_priority_thread->wait_to_finish();
+
+ task_mutex.lock();
native_thread_allocator.free(task->low_priority_thread);
} else {
int *index = thread_ids.getptr(Thread::get_caller_id());
@@ -272,9 +273,10 @@ void WorkerThreadPool::wait_for_task_completion(TaskID p_task_id) {
} else {
task->done_semaphore.wait();
}
+
+ task_mutex.lock();
}
- task_mutex.lock();
tasks.erase(p_task_id);
task_allocator.free(task);
task_mutex.unlock();
@@ -379,8 +381,8 @@ void WorkerThreadPool::wait_for_group_task_completion(GroupID p_group) {
if (group->low_priority_native_tasks.size() > 0) {
for (Task *task : group->low_priority_native_tasks) {
task->low_priority_thread->wait_to_finish();
- native_thread_allocator.free(task->low_priority_thread);
task_mutex.lock();
+ native_thread_allocator.free(task->low_priority_thread);
task_allocator.free(task);
task_mutex.unlock();
}
@@ -443,7 +445,7 @@ void WorkerThreadPool::finish() {
}
task_mutex.unlock();
- exit_threads.set_to(true);
+ exit_threads = true;
for (uint32_t i = 0; i < threads.size(); i++) {
task_available_semaphore.post();
diff --git a/core/object/worker_thread_pool.h b/core/object/worker_thread_pool.h
index c62e05fc28..d47c6ad714 100644
--- a/core/object/worker_thread_pool.h
+++ b/core/object/worker_thread_pool.h
@@ -107,7 +107,7 @@ private:
};
TightLocalVector<ThreadData> threads;
- SafeFlag exit_threads;
+ bool exit_threads = false;
HashMap<Thread::ID, int> thread_ids;
HashMap<TaskID, Task *> tasks;
@@ -115,7 +115,7 @@ private:
bool use_native_low_priority_threads = false;
uint32_t max_low_priority_threads = 0;
- SafeNumeric<uint32_t> low_priority_threads_used;
+ uint32_t low_priority_threads_used = 0;
uint64_t last_task = 1;
diff --git a/core/os/mutex.h b/core/os/mutex.h
index 90cc1632e8..cee0f8af74 100644
--- a/core/os/mutex.h
+++ b/core/os/mutex.h
@@ -119,8 +119,25 @@ class MutexLock {
public:
_ALWAYS_INLINE_ explicit MutexLock(const MutexT &p_mutex) :
+ lock(p_mutex.mutex){};
+};
+
+// This specialization is needed so manual locking and MutexLock can be used
+// at the same time on a SafeBinaryMutex.
+template <int Tag>
+class MutexLock<SafeBinaryMutex<Tag>> {
+ friend class ConditionVariable;
+
+ std::unique_lock<std::mutex> lock;
+
+public:
+ _ALWAYS_INLINE_ explicit MutexLock(const SafeBinaryMutex<Tag> &p_mutex) :
lock(p_mutex.mutex) {
- }
+ SafeBinaryMutex<Tag>::count++;
+ };
+ _ALWAYS_INLINE_ ~MutexLock() {
+ SafeBinaryMutex<Tag>::count--;
+ };
};
using Mutex = MutexImpl<std::recursive_mutex>; // Recursive, for general use
diff --git a/core/os/thread.cpp b/core/os/thread.cpp
index 502f82aaef..c067ad1a6a 100644
--- a/core/os/thread.cpp
+++ b/core/os/thread.cpp
@@ -66,11 +66,12 @@ void Thread::callback(ID p_caller_id, const Settings &p_settings, Callback p_cal
}
}
-void Thread::start(Thread::Callback p_callback, void *p_user, const Settings &p_settings) {
- ERR_FAIL_COND_MSG(id != UNASSIGNED_ID, "A Thread object has been re-started without wait_to_finish() having been called on it.");
+Thread::ID Thread::start(Thread::Callback p_callback, void *p_user, const Settings &p_settings) {
+ ERR_FAIL_COND_V_MSG(id != UNASSIGNED_ID, UNASSIGNED_ID, "A Thread object has been re-started without wait_to_finish() having been called on it.");
id = id_counter.increment();
std::thread new_thread(&Thread::callback, id, p_settings, p_callback, p_user);
thread.swap(new_thread);
+ return id;
}
bool Thread::is_started() const {
diff --git a/core/os/thread.h b/core/os/thread.h
index a769bb1df4..3e307adfff 100644
--- a/core/os/thread.h
+++ b/core/os/thread.h
@@ -109,7 +109,7 @@ public:
static Error set_name(const String &p_name);
- void start(Thread::Callback p_callback, void *p_user, const Settings &p_settings = Settings());
+ ID start(Thread::Callback p_callback, void *p_user, const Settings &p_settings = Settings());
bool is_started() const;
///< waits until thread is finished, and deallocates it.
void wait_to_finish();
diff --git a/core/os/time.cpp b/core/os/time.cpp
index 12e6f08525..038e4adc03 100644
--- a/core/os/time.cpp
+++ b/core/os/time.cpp
@@ -382,10 +382,10 @@ String Time::get_time_string_from_system(bool p_utc) const {
Dictionary Time::get_time_zone_from_system() const {
OS::TimeZoneInfo info = OS::get_singleton()->get_time_zone_info();
- Dictionary timezone;
- timezone["bias"] = info.bias;
- timezone["name"] = info.name;
- return timezone;
+ Dictionary ret_timezone;
+ ret_timezone["bias"] = info.bias;
+ ret_timezone["name"] = info.name;
+ return ret_timezone;
}
double Time::get_unix_time_from_system() const {
diff --git a/core/register_core_types.cpp b/core/register_core_types.cpp
index a374e7c009..b8b8119618 100644
--- a/core/register_core_types.cpp
+++ b/core/register_core_types.cpp
@@ -302,15 +302,9 @@ void register_core_settings() {
GLOBAL_DEF_RST(PropertyInfo(Variant::INT, "network/limits/packet_peer_stream/max_buffer_po2", PROPERTY_HINT_RANGE, "0,64,1,or_greater"), (16));
GLOBAL_DEF(PropertyInfo(Variant::STRING, "network/tls/certificate_bundle_override", PROPERTY_HINT_FILE, "*.crt"), "");
- int worker_threads = GLOBAL_DEF("threading/worker_pool/max_threads", -1);
- bool low_priority_use_system_threads = GLOBAL_DEF("threading/worker_pool/use_system_threads_for_low_priority_tasks", true);
- float low_property_ratio = GLOBAL_DEF("threading/worker_pool/low_priority_thread_ratio", 0.3);
-
- if (Engine::get_singleton()->is_editor_hint() || Engine::get_singleton()->is_project_manager_hint()) {
- worker_thread_pool->init();
- } else {
- worker_thread_pool->init(worker_threads, low_priority_use_system_threads, low_property_ratio);
- }
+ GLOBAL_DEF("threading/worker_pool/max_threads", -1);
+ GLOBAL_DEF("threading/worker_pool/use_system_threads_for_low_priority_tasks", true);
+ GLOBAL_DEF("threading/worker_pool/low_priority_thread_ratio", 0.3);
}
void register_core_singletons() {
diff --git a/core/variant/variant_call.cpp b/core/variant/variant_call.cpp
index e22970ef5c..0a836c125a 100644
--- a/core/variant/variant_call.cpp
+++ b/core/variant/variant_call.cpp
@@ -2002,7 +2002,6 @@ static void _register_variant_builtin_methods() {
bind_static_method(Color, html_is_valid, sarray("color"), varray());
bind_static_method(Color, from_string, sarray("str", "default"), varray());
bind_static_method(Color, from_hsv, sarray("h", "s", "v", "alpha"), varray(1.0));
- bind_static_method(Color, from_hsl, sarray("h", "s", "l", "alpha"), varray(1.0));
bind_static_method(Color, from_ok_hsl, sarray("h", "s", "l", "alpha"), varray(1.0));
bind_static_method(Color, from_rgbe9995, sarray("rgbe"), varray());
diff --git a/core/variant/variant_setget.cpp b/core/variant/variant_setget.cpp
index ce035f5f7a..30fb5d0e9f 100644
--- a/core/variant/variant_setget.cpp
+++ b/core/variant/variant_setget.cpp
@@ -139,10 +139,6 @@ void register_named_setters_getters() {
REGISTER_MEMBER(Color, h);
REGISTER_MEMBER(Color, s);
REGISTER_MEMBER(Color, v);
-
- REGISTER_MEMBER(Color, hsl_h);
- REGISTER_MEMBER(Color, hsl_s);
- REGISTER_MEMBER(Color, hsl_l);
}
void unregister_named_setters_getters() {
diff --git a/core/variant/variant_setget.h b/core/variant/variant_setget.h
index db6e273817..176967344f 100644
--- a/core/variant/variant_setget.h
+++ b/core/variant/variant_setget.h
@@ -344,10 +344,6 @@ SETGET_NUMBER_STRUCT_FUNC(Color, double, h, set_h, get_h)
SETGET_NUMBER_STRUCT_FUNC(Color, double, s, set_s, get_s)
SETGET_NUMBER_STRUCT_FUNC(Color, double, v, set_v, get_v)
-SETGET_NUMBER_STRUCT_FUNC(Color, double, hsl_h, set_hsl_h, get_hsl_h)
-SETGET_NUMBER_STRUCT_FUNC(Color, double, hsl_s, set_hsl_s, get_hsl_s)
-SETGET_NUMBER_STRUCT_FUNC(Color, double, hsl_l, set_hsl_l, get_hsl_l)
-
SETGET_NUMBER_STRUCT_FUNC(Color, double, ok_hsl_h, set_ok_hsl_h, get_ok_hsl_h)
SETGET_NUMBER_STRUCT_FUNC(Color, double, ok_hsl_s, set_ok_hsl_s, get_ok_hsl_s)
SETGET_NUMBER_STRUCT_FUNC(Color, double, ok_hsl_l, set_ok_hsl_l, get_ok_hsl_l)