summaryrefslogtreecommitdiffstats
path: root/core
diff options
context:
space:
mode:
Diffstat (limited to 'core')
-rw-r--r--core/error/error_list.h1
-rw-r--r--core/io/file_access.cpp8
-rw-r--r--core/io/ip.cpp33
-rw-r--r--core/io/resource_importer.cpp17
-rw-r--r--core/io/resource_importer.h1
-rw-r--r--core/io/resource_loader.cpp76
-rw-r--r--core/os/mutex.h17
-rw-r--r--core/os/safe_binary_mutex.h20
-rw-r--r--core/string/node_path.cpp10
-rw-r--r--core/templates/paged_allocator.h24
-rw-r--r--core/templates/rid_owner.h40
-rw-r--r--core/templates/sort_array.h6
12 files changed, 156 insertions, 97 deletions
diff --git a/core/error/error_list.h b/core/error/error_list.h
index abc637106a..cdf06eb06d 100644
--- a/core/error/error_list.h
+++ b/core/error/error_list.h
@@ -41,6 +41,7 @@
* - Are added to the Error enum in core/error/error_list.h
* - Have a description added to error_names in core/error/error_list.cpp
* - Are bound with BIND_CORE_ENUM_CONSTANT() in core/core_constants.cpp
+ * - Have a matching Android version in platform/android/java/lib/src/org/godotengine/godot/error/Error.kt
*/
enum Error {
diff --git a/core/io/file_access.cpp b/core/io/file_access.cpp
index 1cf388b33a..c857d54925 100644
--- a/core/io/file_access.cpp
+++ b/core/io/file_access.cpp
@@ -59,11 +59,9 @@ bool FileAccess::exists(const String &p_name) {
return true;
}
- Ref<FileAccess> f = open(p_name, READ);
- if (f.is_null()) {
- return false;
- }
- return true;
+ // Using file_exists because it's faster than trying to open the file.
+ Ref<FileAccess> ret = create_for_path(p_name);
+ return ret->file_exists(p_name);
}
void FileAccess::_set_access_type(AccessType p_access) {
diff --git a/core/io/ip.cpp b/core/io/ip.cpp
index f20d65bef9..38c71b19fa 100644
--- a/core/io/ip.cpp
+++ b/core/io/ip.cpp
@@ -81,17 +81,17 @@ struct _IP_ResolverPrivate {
continue;
}
- mutex.lock();
+ MutexLock lock(mutex);
List<IPAddress> response;
String hostname = queue[i].hostname;
IP::Type type = queue[i].type;
- mutex.unlock();
+ lock.temp_unlock();
// We should not lock while resolving the hostname,
// only when modifying the queue.
IP::get_singleton()->_resolve_hostname(response, hostname, type);
- MutexLock lock(mutex);
+ lock.temp_relock();
// Could have been completed by another function, or deleted.
if (queue[i].status.get() != IP::RESOLVER_STATUS_WAITING) {
continue;
@@ -131,21 +131,22 @@ PackedStringArray IP::resolve_hostname_addresses(const String &p_hostname, Type
List<IPAddress> res;
String key = _IP_ResolverPrivate::get_cache_key(p_hostname, p_type);
- resolver->mutex.lock();
- if (resolver->cache.has(key)) {
- res = resolver->cache[key];
- } else {
- // This should be run unlocked so the resolver thread can keep resolving
- // other requests.
- resolver->mutex.unlock();
- _resolve_hostname(res, p_hostname, p_type);
- resolver->mutex.lock();
- // We might be overriding another result, but we don't care as long as the result is valid.
- if (res.size()) {
- resolver->cache[key] = res;
+ {
+ MutexLock lock(resolver->mutex);
+ if (resolver->cache.has(key)) {
+ res = resolver->cache[key];
+ } else {
+ // This should be run unlocked so the resolver thread can keep resolving
+ // other requests.
+ lock.temp_unlock();
+ _resolve_hostname(res, p_hostname, p_type);
+ lock.temp_relock();
+ // We might be overriding another result, but we don't care as long as the result is valid.
+ if (res.size()) {
+ resolver->cache[key] = res;
+ }
}
}
- resolver->mutex.unlock();
PackedStringArray result;
for (const IPAddress &E : res) {
diff --git a/core/io/resource_importer.cpp b/core/io/resource_importer.cpp
index 9e6f3ba314..b4c43abe00 100644
--- a/core/io/resource_importer.cpp
+++ b/core/io/resource_importer.cpp
@@ -364,6 +364,23 @@ ResourceUID::ID ResourceFormatImporter::get_resource_uid(const String &p_path) c
return pat.uid;
}
+Error ResourceFormatImporter::get_resource_import_info(const String &p_path, StringName &r_type, ResourceUID::ID &r_uid, String &r_import_group_file) const {
+ PathAndType pat;
+ Error err = _get_path_and_type(p_path, pat);
+
+ if (err == OK) {
+ r_type = pat.type;
+ r_uid = pat.uid;
+ r_import_group_file = pat.group_file;
+ } else {
+ r_type = "";
+ r_uid = ResourceUID::INVALID_ID;
+ r_import_group_file = "";
+ }
+
+ return err;
+}
+
Variant ResourceFormatImporter::get_resource_metadata(const String &p_path) const {
PathAndType pat;
Error err = _get_path_and_type(p_path, pat);
diff --git a/core/io/resource_importer.h b/core/io/resource_importer.h
index dbd9e70d16..7b1806c3d2 100644
--- a/core/io/resource_importer.h
+++ b/core/io/resource_importer.h
@@ -93,6 +93,7 @@ public:
String get_import_settings_hash() const;
String get_import_base_path(const String &p_for_file) const;
+ Error get_resource_import_info(const String &p_path, StringName &r_type, ResourceUID::ID &r_uid, String &r_import_group_file) const;
ResourceFormatImporter();
};
diff --git a/core/io/resource_loader.cpp b/core/io/resource_loader.cpp
index eaee849a87..d2c4668d12 100644
--- a/core/io/resource_loader.cpp
+++ b/core/io/resource_loader.cpp
@@ -690,10 +690,10 @@ Ref<Resource> ResourceLoader::load_threaded_get(const String &p_path, Error *r_e
if (Thread::is_main_thread() && !load_token->local_path.is_empty()) {
const ThreadLoadTask &load_task = thread_load_tasks[load_token->local_path];
while (load_task.status == THREAD_LOAD_IN_PROGRESS) {
- thread_load_lock.~MutexLock();
+ thread_load_lock.temp_unlock();
bool exit = !_ensure_load_progress();
OS::get_singleton()->delay_usec(1000);
- new (&thread_load_lock) MutexLock(thread_load_mutex);
+ thread_load_lock.temp_relock();
if (exit) {
break;
}
@@ -754,7 +754,7 @@ Ref<Resource> ResourceLoader::_load_complete_inner(LoadToken &p_load_token, Erro
bool loader_is_wtp = load_task.task_id != 0;
if (loader_is_wtp) {
// Loading thread is in the worker pool.
- thread_load_mutex.unlock();
+ p_thread_load_lock.temp_unlock();
PREPARE_FOR_WTP_WAIT
Error wait_err = WorkerThreadPool::get_singleton()->wait_for_task_completion(load_task.task_id);
@@ -770,7 +770,7 @@ Ref<Resource> ResourceLoader::_load_complete_inner(LoadToken &p_load_token, Erro
_run_load_task(&load_task);
}
- thread_load_mutex.lock();
+ p_thread_load_lock.temp_relock();
load_task.awaited = true;
DEV_ASSERT(load_task.status == THREAD_LOAD_FAILED || load_task.status == THREAD_LOAD_LOADED);
@@ -1104,36 +1104,39 @@ String ResourceLoader::_path_remap(const String &p_path, bool *r_translation_rem
new_path = path_remaps[new_path];
} else {
// Try file remap.
- Error err;
- Ref<FileAccess> f = FileAccess::open(new_path + ".remap", FileAccess::READ, &err);
- if (f.is_valid()) {
- VariantParser::StreamFile stream;
- stream.f = f;
-
- String assign;
- Variant value;
- VariantParser::Tag next_tag;
-
- int lines = 0;
- String error_text;
- while (true) {
- assign = Variant();
- next_tag.fields.clear();
- next_tag.name = String();
-
- err = VariantParser::parse_tag_assign_eof(&stream, lines, error_text, next_tag, assign, value, nullptr, true);
- if (err == ERR_FILE_EOF) {
- break;
- } else if (err != OK) {
- ERR_PRINT("Parse error: " + p_path + ".remap:" + itos(lines) + " error: " + error_text + ".");
- break;
- }
+ // Usually, there's no remap file and FileAccess::exists() is faster than FileAccess::open().
+ if (FileAccess::exists(new_path + ".remap")) {
+ Error err;
+ Ref<FileAccess> f = FileAccess::open(new_path + ".remap", FileAccess::READ, &err);
+ if (f.is_valid()) {
+ VariantParser::StreamFile stream;
+ stream.f = f;
+
+ String assign;
+ Variant value;
+ VariantParser::Tag next_tag;
+
+ int lines = 0;
+ String error_text;
+ while (true) {
+ assign = Variant();
+ next_tag.fields.clear();
+ next_tag.name = String();
+
+ err = VariantParser::parse_tag_assign_eof(&stream, lines, error_text, next_tag, assign, value, nullptr, true);
+ if (err == ERR_FILE_EOF) {
+ break;
+ } else if (err != OK) {
+ ERR_PRINT("Parse error: " + p_path + ".remap:" + itos(lines) + " error: " + error_text + ".");
+ break;
+ }
- if (assign == "path") {
- new_path = value;
- break;
- } else if (next_tag.name != "remap") {
- break;
+ if (assign == "path") {
+ new_path = value;
+ break;
+ } else if (next_tag.name != "remap") {
+ break;
+ }
}
}
}
@@ -1205,7 +1208,7 @@ void ResourceLoader::clear_translation_remaps() {
void ResourceLoader::clear_thread_load_tasks() {
// Bring the thing down as quickly as possible without causing deadlocks or leaks.
- thread_load_mutex.lock();
+ MutexLock thread_load_lock(thread_load_mutex);
cleaning_tasks = true;
while (true) {
@@ -1224,9 +1227,9 @@ void ResourceLoader::clear_thread_load_tasks() {
if (none_running) {
break;
}
- thread_load_mutex.unlock();
+ thread_load_lock.temp_unlock();
OS::get_singleton()->delay_usec(1000);
- thread_load_mutex.lock();
+ thread_load_lock.temp_relock();
}
while (user_load_tokens.begin()) {
@@ -1241,7 +1244,6 @@ void ResourceLoader::clear_thread_load_tasks() {
thread_load_tasks.clear();
cleaning_tasks = false;
- thread_load_mutex.unlock();
}
void ResourceLoader::load_path_remaps() {
diff --git a/core/os/mutex.h b/core/os/mutex.h
index 773b31828d..a968fd7029 100644
--- a/core/os/mutex.h
+++ b/core/os/mutex.h
@@ -72,7 +72,7 @@ public:
template <typename MutexT>
class MutexLock {
- THREADING_NAMESPACE::unique_lock<typename MutexT::StdMutexType> lock;
+ mutable THREADING_NAMESPACE::unique_lock<typename MutexT::StdMutexType> lock;
public:
explicit MutexLock(const MutexT &p_mutex) :
@@ -82,8 +82,18 @@ public:
template <typename T = MutexT>
_ALWAYS_INLINE_ THREADING_NAMESPACE::unique_lock<THREADING_NAMESPACE::mutex> &_get_lock(
typename std::enable_if<std::is_same<T, THREADING_NAMESPACE::mutex>::value> * = nullptr) const {
- return const_cast<THREADING_NAMESPACE::unique_lock<THREADING_NAMESPACE::mutex> &>(lock);
+ return lock;
}
+
+ _ALWAYS_INLINE_ void temp_relock() const {
+ lock.lock();
+ }
+
+ _ALWAYS_INLINE_ void temp_unlock() const {
+ lock.unlock();
+ }
+
+ // TODO: Implement a `try_temp_relock` if needed (will also need a dummy method below).
};
using Mutex = MutexImpl<THREADING_NAMESPACE::recursive_mutex>; // Recursive, for general use
@@ -109,6 +119,9 @@ template <typename MutexT>
class MutexLock {
public:
MutexLock(const MutexT &p_mutex) {}
+
+ void temp_relock() const {}
+ void temp_unlock() const {}
};
using Mutex = MutexImpl;
diff --git a/core/os/safe_binary_mutex.h b/core/os/safe_binary_mutex.h
index 1035ee76b4..74a20043a3 100644
--- a/core/os/safe_binary_mutex.h
+++ b/core/os/safe_binary_mutex.h
@@ -108,6 +108,16 @@ public:
~MutexLock() {
mutex.unlock();
}
+
+ _ALWAYS_INLINE_ void temp_relock() const {
+ mutex.lock();
+ }
+
+ _ALWAYS_INLINE_ void temp_unlock() const {
+ mutex.unlock();
+ }
+
+ // TODO: Implement a `try_temp_relock` if needed (will also need a dummy method below).
};
#ifdef __clang__
@@ -128,6 +138,16 @@ public:
void unlock() const {}
};
+template <int Tag>
+class MutexLock<SafeBinaryMutex<Tag>> {
+public:
+ MutexLock(const SafeBinaryMutex<Tag> &p_mutex) {}
+ ~MutexLock() {}
+
+ void temp_relock() const {}
+ void temp_unlock() const {}
+};
+
#endif // THREADS_ENABLED
#endif // SAFE_BINARY_MUTEX_H
diff --git a/core/string/node_path.cpp b/core/string/node_path.cpp
index 8ae2efb787..fdc72bc8dc 100644
--- a/core/string/node_path.cpp
+++ b/core/string/node_path.cpp
@@ -215,7 +215,10 @@ StringName NodePath::get_concatenated_names() const {
String concatenated;
const StringName *sn = data->path.ptr();
for (int i = 0; i < pc; i++) {
- concatenated += i == 0 ? sn[i].operator String() : "/" + sn[i];
+ if (i > 0) {
+ concatenated += "/";
+ }
+ concatenated += sn[i].operator String();
}
data->concatenated_path = concatenated;
}
@@ -230,7 +233,10 @@ StringName NodePath::get_concatenated_subnames() const {
String concatenated;
const StringName *ssn = data->subpath.ptr();
for (int i = 0; i < spc; i++) {
- concatenated += i == 0 ? ssn[i].operator String() : ":" + ssn[i];
+ if (i > 0) {
+ concatenated += ":";
+ }
+ concatenated += ssn[i].operator String();
}
data->concatenated_subpath = concatenated;
}
diff --git a/core/templates/paged_allocator.h b/core/templates/paged_allocator.h
index 4854e1b866..0b70fa02f3 100644
--- a/core/templates/paged_allocator.h
+++ b/core/templates/paged_allocator.h
@@ -55,7 +55,7 @@ class PagedAllocator {
public:
template <typename... Args>
T *alloc(Args &&...p_args) {
- if (thread_safe) {
+ if constexpr (thread_safe) {
spin_lock.lock();
}
if (unlikely(allocs_available == 0)) {
@@ -76,7 +76,7 @@ public:
allocs_available--;
T *alloc = available_pool[allocs_available >> page_shift][allocs_available & page_mask];
- if (thread_safe) {
+ if constexpr (thread_safe) {
spin_lock.unlock();
}
memnew_placement(alloc, T(p_args...));
@@ -84,13 +84,13 @@ public:
}
void free(T *p_mem) {
- if (thread_safe) {
+ if constexpr (thread_safe) {
spin_lock.lock();
}
p_mem->~T();
available_pool[allocs_available >> page_shift][allocs_available & page_mask] = p_mem;
allocs_available++;
- if (thread_safe) {
+ if constexpr (thread_safe) {
spin_lock.unlock();
}
}
@@ -120,28 +120,28 @@ private:
public:
void reset(bool p_allow_unfreed = false) {
- if (thread_safe) {
+ if constexpr (thread_safe) {
spin_lock.lock();
}
_reset(p_allow_unfreed);
- if (thread_safe) {
+ if constexpr (thread_safe) {
spin_lock.unlock();
}
}
bool is_configured() const {
- if (thread_safe) {
+ if constexpr (thread_safe) {
spin_lock.lock();
}
bool result = page_size > 0;
- if (thread_safe) {
+ if constexpr (thread_safe) {
spin_lock.unlock();
}
return result;
}
void configure(uint32_t p_page_size) {
- if (thread_safe) {
+ if constexpr (thread_safe) {
spin_lock.lock();
}
ERR_FAIL_COND(page_pool != nullptr); // Safety check.
@@ -149,7 +149,7 @@ public:
page_size = nearest_power_of_2_templated(p_page_size);
page_mask = page_size - 1;
page_shift = get_shift_from_power_of_2(page_size);
- if (thread_safe) {
+ if constexpr (thread_safe) {
spin_lock.unlock();
}
}
@@ -161,7 +161,7 @@ public:
}
~PagedAllocator() {
- if (thread_safe) {
+ if constexpr (thread_safe) {
spin_lock.lock();
}
bool leaked = allocs_available < pages_allocated * page_size;
@@ -172,7 +172,7 @@ public:
} else {
_reset(false);
}
- if (thread_safe) {
+ if constexpr (thread_safe) {
spin_lock.unlock();
}
}
diff --git a/core/templates/rid_owner.h b/core/templates/rid_owner.h
index 86304d3c73..537413e2ba 100644
--- a/core/templates/rid_owner.h
+++ b/core/templates/rid_owner.h
@@ -82,7 +82,7 @@ class RID_Alloc : public RID_AllocBase {
mutable SpinLock spin_lock;
_FORCE_INLINE_ RID _allocate_rid() {
- if (THREAD_SAFE) {
+ if constexpr (THREAD_SAFE) {
spin_lock.lock();
}
@@ -128,7 +128,7 @@ class RID_Alloc : public RID_AllocBase {
alloc_count++;
- if (THREAD_SAFE) {
+ if constexpr (THREAD_SAFE) {
spin_lock.unlock();
}
@@ -156,14 +156,14 @@ public:
if (p_rid == RID()) {
return nullptr;
}
- if (THREAD_SAFE) {
+ if constexpr (THREAD_SAFE) {
spin_lock.lock();
}
uint64_t id = p_rid.get_id();
uint32_t idx = uint32_t(id & 0xFFFFFFFF);
if (unlikely(idx >= max_alloc)) {
- if (THREAD_SAFE) {
+ if constexpr (THREAD_SAFE) {
spin_lock.unlock();
}
return nullptr;
@@ -176,14 +176,14 @@ public:
if (unlikely(p_initialize)) {
if (unlikely(!(validator_chunks[idx_chunk][idx_element] & 0x80000000))) {
- if (THREAD_SAFE) {
+ if constexpr (THREAD_SAFE) {
spin_lock.unlock();
}
ERR_FAIL_V_MSG(nullptr, "Initializing already initialized RID");
}
if (unlikely((validator_chunks[idx_chunk][idx_element] & 0x7FFFFFFF) != validator)) {
- if (THREAD_SAFE) {
+ if constexpr (THREAD_SAFE) {
spin_lock.unlock();
}
ERR_FAIL_V_MSG(nullptr, "Attempting to initialize the wrong RID");
@@ -192,7 +192,7 @@ public:
validator_chunks[idx_chunk][idx_element] &= 0x7FFFFFFF; //initialized
} else if (unlikely(validator_chunks[idx_chunk][idx_element] != validator)) {
- if (THREAD_SAFE) {
+ if constexpr (THREAD_SAFE) {
spin_lock.unlock();
}
if ((validator_chunks[idx_chunk][idx_element] & 0x80000000) && validator_chunks[idx_chunk][idx_element] != 0xFFFFFFFF) {
@@ -203,7 +203,7 @@ public:
T *ptr = &chunks[idx_chunk][idx_element];
- if (THREAD_SAFE) {
+ if constexpr (THREAD_SAFE) {
spin_lock.unlock();
}
@@ -221,14 +221,14 @@ public:
}
_FORCE_INLINE_ bool owns(const RID &p_rid) const {
- if (THREAD_SAFE) {
+ if constexpr (THREAD_SAFE) {
spin_lock.lock();
}
uint64_t id = p_rid.get_id();
uint32_t idx = uint32_t(id & 0xFFFFFFFF);
if (unlikely(idx >= max_alloc)) {
- if (THREAD_SAFE) {
+ if constexpr (THREAD_SAFE) {
spin_lock.unlock();
}
return false;
@@ -241,7 +241,7 @@ public:
bool owned = (validator != 0x7FFFFFFF) && (validator_chunks[idx_chunk][idx_element] & 0x7FFFFFFF) == validator;
- if (THREAD_SAFE) {
+ if constexpr (THREAD_SAFE) {
spin_lock.unlock();
}
@@ -249,14 +249,14 @@ public:
}
_FORCE_INLINE_ void free(const RID &p_rid) {
- if (THREAD_SAFE) {
+ if constexpr (THREAD_SAFE) {
spin_lock.lock();
}
uint64_t id = p_rid.get_id();
uint32_t idx = uint32_t(id & 0xFFFFFFFF);
if (unlikely(idx >= max_alloc)) {
- if (THREAD_SAFE) {
+ if constexpr (THREAD_SAFE) {
spin_lock.unlock();
}
ERR_FAIL();
@@ -267,12 +267,12 @@ public:
uint32_t validator = uint32_t(id >> 32);
if (unlikely(validator_chunks[idx_chunk][idx_element] & 0x80000000)) {
- if (THREAD_SAFE) {
+ if constexpr (THREAD_SAFE) {
spin_lock.unlock();
}
ERR_FAIL_MSG("Attempted to free an uninitialized or invalid RID.");
} else if (unlikely(validator_chunks[idx_chunk][idx_element] != validator)) {
- if (THREAD_SAFE) {
+ if constexpr (THREAD_SAFE) {
spin_lock.unlock();
}
ERR_FAIL();
@@ -284,7 +284,7 @@ public:
alloc_count--;
free_list_chunks[alloc_count / elements_in_chunk][alloc_count % elements_in_chunk] = idx;
- if (THREAD_SAFE) {
+ if constexpr (THREAD_SAFE) {
spin_lock.unlock();
}
}
@@ -293,7 +293,7 @@ public:
return alloc_count;
}
void get_owned_list(List<RID> *p_owned) const {
- if (THREAD_SAFE) {
+ if constexpr (THREAD_SAFE) {
spin_lock.lock();
}
for (size_t i = 0; i < max_alloc; i++) {
@@ -302,14 +302,14 @@ public:
p_owned->push_back(_make_from_id((validator << 32) | i));
}
}
- if (THREAD_SAFE) {
+ if constexpr (THREAD_SAFE) {
spin_lock.unlock();
}
}
//used for fast iteration in the elements or RIDs
void fill_owned_buffer(RID *p_rid_buffer) const {
- if (THREAD_SAFE) {
+ if constexpr (THREAD_SAFE) {
spin_lock.lock();
}
uint32_t idx = 0;
@@ -320,7 +320,7 @@ public:
idx++;
}
}
- if (THREAD_SAFE) {
+ if constexpr (THREAD_SAFE) {
spin_lock.unlock();
}
}
diff --git a/core/templates/sort_array.h b/core/templates/sort_array.h
index e7eaf8ee81..5bf5b2819d 100644
--- a/core/templates/sort_array.h
+++ b/core/templates/sort_array.h
@@ -174,14 +174,14 @@ public:
while (true) {
while (compare(p_array[p_first], p_pivot)) {
- if (Validate) {
+ if constexpr (Validate) {
ERR_BAD_COMPARE(p_first == unmodified_last - 1);
}
p_first++;
}
p_last--;
while (compare(p_pivot, p_array[p_last])) {
- if (Validate) {
+ if constexpr (Validate) {
ERR_BAD_COMPARE(p_last == unmodified_first);
}
p_last--;
@@ -251,7 +251,7 @@ public:
inline void unguarded_linear_insert(int64_t p_last, T p_value, T *p_array) const {
int64_t next = p_last - 1;
while (compare(p_value, p_array[next])) {
- if (Validate) {
+ if constexpr (Validate) {
ERR_BAD_COMPARE(next == 0);
}
p_array[p_last] = p_array[next];