summaryrefslogtreecommitdiffstats
path: root/core/io/resource_loader.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'core/io/resource_loader.cpp')
-rw-r--r--core/io/resource_loader.cpp145
1 files changed, 98 insertions, 47 deletions
diff --git a/core/io/resource_loader.cpp b/core/io/resource_loader.cpp
index c3c37aa89d..ed5e482296 100644
--- a/core/io/resource_loader.cpp
+++ b/core/io/resource_loader.cpp
@@ -272,6 +272,7 @@ Ref<Resource> ResourceLoader::_load(const String &p_path, const String &p_origin
}
load_paths_stack->resize(load_paths_stack->size() - 1);
+ res_ref_overrides.erase(load_nesting);
load_nesting--;
if (!res.is_null()) {
@@ -302,7 +303,8 @@ void ResourceLoader::_thread_load_function(void *p_userdata) {
thread_load_mutex.unlock();
// Thread-safe either if it's the current thread or a brand new one.
- CallQueue *mq_override = nullptr;
+ bool mq_override_present = false;
+ CallQueue *own_mq_override = nullptr;
if (load_nesting == 0) {
load_paths_stack = memnew(Vector<String>);
@@ -310,8 +312,12 @@ void ResourceLoader::_thread_load_function(void *p_userdata) {
load_paths_stack->push_back(load_task.dependent_path);
}
if (!Thread::is_main_thread()) {
- mq_override = memnew(CallQueue);
- MessageQueue::set_thread_singleton_override(mq_override);
+ // Let the caller thread use its own, for added flexibility. Provide one otherwise.
+ if (MessageQueue::get_singleton() == MessageQueue::get_main_singleton()) {
+ own_mq_override = memnew(CallQueue);
+ MessageQueue::set_thread_singleton_override(own_mq_override);
+ }
+ mq_override_present = true;
set_current_thread_safe_for_nodes(true);
}
} else {
@@ -324,8 +330,8 @@ void ResourceLoader::_thread_load_function(void *p_userdata) {
}
Ref<Resource> res = _load(load_task.remapped_path, load_task.remapped_path != load_task.local_path ? load_task.local_path : String(), load_task.type_hint, load_task.cache_mode, &load_task.error, load_task.use_sub_threads, &load_task.progress);
- if (mq_override) {
- mq_override->flush();
+ if (mq_override_present) {
+ MessageQueue::get_singleton()->flush();
}
thread_load_mutex.lock();
@@ -394,8 +400,9 @@ void ResourceLoader::_thread_load_function(void *p_userdata) {
thread_load_mutex.unlock();
if (load_nesting == 0) {
- if (mq_override) {
- memdelete(mq_override);
+ if (own_mq_override) {
+ MessageQueue::set_thread_singleton_override(nullptr);
+ memdelete(own_mq_override);
}
memdelete(load_paths_stack);
}
@@ -457,25 +464,23 @@ Ref<Resource> ResourceLoader::load(const String &p_path, const String &p_type_hi
Ref<ResourceLoader::LoadToken> ResourceLoader::_load_start(const String &p_path, const String &p_type_hint, LoadThreadMode p_thread_mode, ResourceFormatLoader::CacheMode p_cache_mode) {
String local_path = _validate_local_path(p_path);
+ bool ignoring_cache = p_cache_mode == ResourceFormatLoader::CACHE_MODE_IGNORE || p_cache_mode == ResourceFormatLoader::CACHE_MODE_IGNORE_DEEP;
+
Ref<LoadToken> load_token;
- bool must_not_register = false;
ThreadLoadTask unregistered_load_task; // Once set, must be valid up to the call to do the load.
ThreadLoadTask *load_task_ptr = nullptr;
bool run_on_current_thread = false;
{
MutexLock thread_load_lock(thread_load_mutex);
- if (thread_load_tasks.has(local_path)) {
+ if (!ignoring_cache && thread_load_tasks.has(local_path)) {
load_token = Ref<LoadToken>(thread_load_tasks[local_path].load_token);
if (!load_token.is_valid()) {
// The token is dying (reached 0 on another thread).
// Ensure it's killed now so the path can be safely reused right away.
thread_load_tasks[local_path].load_token->clear();
- } else {
- if (p_cache_mode != ResourceFormatLoader::CACHE_MODE_IGNORE) {
- return load_token;
- }
}
+ return load_token;
}
load_token.instantiate();
@@ -503,19 +508,19 @@ Ref<ResourceLoader::LoadToken> ResourceLoader::_load_start(const String &p_path,
}
}
- // If we want to ignore cache, but there's another task loading it, we can't add this one to the map and we also have to finish unconditionally synchronously.
- must_not_register = thread_load_tasks.has(local_path) && p_cache_mode == ResourceFormatLoader::CACHE_MODE_IGNORE;
- if (must_not_register) {
+ // Cache-ignoring tasks aren't registered in the map and so must finish within scope.
+ if (ignoring_cache) {
load_token->local_path.clear();
unregistered_load_task = load_task;
+ load_task_ptr = &unregistered_load_task;
} else {
- thread_load_tasks[local_path] = load_task;
+ DEV_ASSERT(!thread_load_tasks.has(local_path));
+ HashMap<String, ResourceLoader::ThreadLoadTask>::Iterator E = thread_load_tasks.insert(local_path, load_task);
+ load_task_ptr = &E->value;
}
-
- load_task_ptr = must_not_register ? &unregistered_load_task : &thread_load_tasks[local_path];
}
- run_on_current_thread = must_not_register || p_thread_mode == LOAD_THREAD_FROM_CURRENT;
+ run_on_current_thread = ignoring_cache || p_thread_mode == LOAD_THREAD_FROM_CURRENT;
if (run_on_current_thread) {
load_task_ptr->thread_id = Thread::get_caller_id();
@@ -526,7 +531,7 @@ Ref<ResourceLoader::LoadToken> ResourceLoader::_load_start(const String &p_path,
if (run_on_current_thread) {
_thread_load_function(load_task_ptr);
- if (must_not_register) {
+ if (ignoring_cache) {
load_token->res_if_unregistered = load_task_ptr->resource;
}
}
@@ -656,39 +661,50 @@ Ref<Resource> ResourceLoader::_load_complete_inner(LoadToken &p_load_token, Erro
return Ref<Resource>();
}
- if (load_task.task_id != 0) {
+ bool loader_is_wtp = load_task.task_id != 0;
+ Error wtp_task_err = FAILED;
+ if (loader_is_wtp) {
// Loading thread is in the worker pool.
thread_load_mutex.unlock();
- Error err = WorkerThreadPool::get_singleton()->wait_for_task_completion(load_task.task_id);
- if (err == ERR_BUSY) {
- // The WorkerThreadPool has reported that the current task wants to await on an older one.
- // That't not allowed for safety, to avoid deadlocks. Fortunately, though, in the context of
- // resource loading that means that the task to wait for can be restarted here to break the
- // cycle, with as much recursion into this process as needed.
- // When the stack is eventually unrolled, the original load will have been notified to go on.
- // CACHE_MODE_IGNORE is needed because, otherwise, the new request would just see there's
- // an ongoing load for that resource and wait for it again. This value forces a new load.
- Ref<ResourceLoader::LoadToken> token = _load_start(load_task.local_path, load_task.type_hint, LOAD_THREAD_DISTRIBUTE, ResourceFormatLoader::CACHE_MODE_IGNORE);
- Ref<Resource> resource = _load_complete(*token.ptr(), &err);
- if (r_error) {
- *r_error = err;
+ wtp_task_err = WorkerThreadPool::get_singleton()->wait_for_task_completion(load_task.task_id);
+ }
+
+ if (load_task.status == THREAD_LOAD_IN_PROGRESS) { // If early errored, awaiting would deadlock.
+ if (loader_is_wtp) {
+ if (wtp_task_err == ERR_BUSY) {
+ // The WorkerThreadPool has reported that the current task wants to await on an older one.
+ // That't not allowed for safety, to avoid deadlocks. Fortunately, though, in the context of
+ // resource loading that means that the task to wait for can be restarted here to break the
+ // cycle, with as much recursion into this process as needed.
+ // When the stack is eventually unrolled, the original load will have been notified to go on.
+ // CACHE_MODE_IGNORE is needed because, otherwise, the new request would just see there's
+ // an ongoing load for that resource and wait for it again. This value forces a new load.
+ Ref<ResourceLoader::LoadToken> token = _load_start(load_task.local_path, load_task.type_hint, LOAD_THREAD_DISTRIBUTE, ResourceFormatLoader::CACHE_MODE_IGNORE);
+ Ref<Resource> resource = _load_complete(*token.ptr(), &wtp_task_err);
+ if (r_error) {
+ *r_error = wtp_task_err;
+ }
+ thread_load_mutex.lock();
+ return resource;
+ } else {
+ DEV_ASSERT(wtp_task_err == OK);
+ thread_load_mutex.lock();
+ load_task.awaited = true;
}
- thread_load_mutex.lock();
- return resource;
} else {
- DEV_ASSERT(err == OK);
- thread_load_mutex.lock();
- load_task.awaited = true;
+ // Loading thread is main or user thread.
+ if (!load_task.cond_var) {
+ load_task.cond_var = memnew(ConditionVariable);
+ }
+ do {
+ load_task.cond_var->wait(p_thread_load_lock);
+ DEV_ASSERT(thread_load_tasks.has(p_load_token.local_path) && p_load_token.get_reference_count());
+ } while (load_task.cond_var);
}
} else {
- // Loading thread is main or user thread.
- if (!load_task.cond_var) {
- load_task.cond_var = memnew(ConditionVariable);
+ if (loader_is_wtp) {
+ thread_load_mutex.lock();
}
- do {
- load_task.cond_var->wait(p_thread_load_lock);
- DEV_ASSERT(thread_load_tasks.has(p_load_token.local_path) && p_load_token.get_reference_count());
- } while (load_task.cond_var);
}
}
@@ -715,6 +731,40 @@ Ref<Resource> ResourceLoader::_load_complete_inner(LoadToken &p_load_token, Erro
}
}
+Ref<Resource> ResourceLoader::ensure_resource_ref_override_for_outer_load(const String &p_path, const String &p_res_type) {
+ ERR_FAIL_COND_V(load_nesting == 0, Ref<Resource>()); // It makes no sense to use this from nesting level 0.
+ const String &local_path = _validate_local_path(p_path);
+ HashMap<String, Ref<Resource>> &overrides = res_ref_overrides[load_nesting - 1];
+ HashMap<String, Ref<Resource>>::Iterator E = overrides.find(local_path);
+ if (E) {
+ return E->value;
+ } else {
+ Object *obj = ClassDB::instantiate(p_res_type);
+ ERR_FAIL_NULL_V(obj, Ref<Resource>());
+ Ref<Resource> res(obj);
+ if (!res.is_valid()) {
+ memdelete(obj);
+ ERR_FAIL_V(Ref<Resource>());
+ }
+ overrides[local_path] = res;
+ return res;
+ }
+}
+
+Ref<Resource> ResourceLoader::get_resource_ref_override(const String &p_path) {
+ DEV_ASSERT(p_path == _validate_local_path(p_path));
+ HashMap<int, HashMap<String, Ref<Resource>>>::Iterator E = res_ref_overrides.find(load_nesting);
+ if (!E) {
+ return nullptr;
+ }
+ HashMap<String, Ref<Resource>>::Iterator F = E->value.find(p_path);
+ if (!F) {
+ return nullptr;
+ }
+
+ return F->value;
+}
+
bool ResourceLoader::exists(const String &p_path, const String &p_type_hint) {
String local_path = _validate_local_path(p_path);
@@ -1207,6 +1257,7 @@ bool ResourceLoader::timestamp_on_load = false;
thread_local int ResourceLoader::load_nesting = 0;
thread_local WorkerThreadPool::TaskID ResourceLoader::caller_task_id = 0;
thread_local Vector<String> *ResourceLoader::load_paths_stack;
+thread_local HashMap<int, HashMap<String, Ref<Resource>>> ResourceLoader::res_ref_overrides;
template <>
thread_local uint32_t SafeBinaryMutex<ResourceLoader::BINARY_MUTEX_TAG>::count = 0;