diff options
Diffstat (limited to 'core/object')
-rw-r--r-- | core/object/callable_method_pointer.h | 24 | ||||
-rw-r--r-- | core/object/class_db.cpp | 29 | ||||
-rw-r--r-- | core/object/class_db.h | 21 | ||||
-rw-r--r-- | core/object/make_virtuals.py | 236 | ||||
-rw-r--r-- | core/object/message_queue.cpp | 20 | ||||
-rw-r--r-- | core/object/object.cpp | 17 | ||||
-rw-r--r-- | core/object/object.h | 4 | ||||
-rw-r--r-- | core/object/ref_counted.h | 5 | ||||
-rw-r--r-- | core/object/script_language.cpp | 18 | ||||
-rw-r--r-- | core/object/script_language.h | 6 | ||||
-rw-r--r-- | core/object/script_language_extension.cpp | 3 | ||||
-rw-r--r-- | core/object/script_language_extension.h | 4 | ||||
-rw-r--r-- | core/object/undo_redo.cpp | 20 | ||||
-rw-r--r-- | core/object/undo_redo.h | 4 | ||||
-rw-r--r-- | core/object/worker_thread_pool.cpp | 533 | ||||
-rw-r--r-- | core/object/worker_thread_pool.h | 66 |
16 files changed, 576 insertions, 434 deletions
diff --git a/core/object/callable_method_pointer.h b/core/object/callable_method_pointer.h index db78b982e4..f8e8c4d7e9 100644 --- a/core/object/callable_method_pointer.h +++ b/core/object/callable_method_pointer.h @@ -81,35 +81,27 @@ template <class T, class... P> class CallableCustomMethodPointer : public CallableCustomMethodPointerBase { struct Data { T *instance; -#ifdef DEBUG_ENABLED uint64_t object_id; -#endif void (T::*method)(P...); } data; public: virtual ObjectID get_object() const { -#ifdef DEBUG_ENABLED if (ObjectDB::get_instance(ObjectID(data.object_id)) == nullptr) { return ObjectID(); } -#endif return data.instance->get_instance_id(); } virtual void call(const Variant **p_arguments, int p_argcount, Variant &r_return_value, Callable::CallError &r_call_error) const { -#ifdef DEBUG_ENABLED ERR_FAIL_NULL_MSG(ObjectDB::get_instance(ObjectID(data.object_id)), "Invalid Object id '" + uitos(data.object_id) + "', can't call method."); -#endif call_with_variant_args(data.instance, data.method, p_arguments, p_argcount, r_call_error); } CallableCustomMethodPointer(T *p_instance, void (T::*p_method)(P...)) { memset(&data, 0, sizeof(Data)); // Clear beforehand, may have padding bytes. data.instance = p_instance; -#ifdef DEBUG_ENABLED data.object_id = p_instance->get_instance_id(); -#endif data.method = p_method; _setup((uint32_t *)&data, sizeof(Data)); } @@ -135,36 +127,28 @@ template <class T, class R, class... P> class CallableCustomMethodPointerRet : public CallableCustomMethodPointerBase { struct Data { T *instance; -#ifdef DEBUG_ENABLED uint64_t object_id; -#endif R(T::*method) (P...); } data; public: virtual ObjectID get_object() const { -#ifdef DEBUG_ENABLED if (ObjectDB::get_instance(ObjectID(data.object_id)) == nullptr) { return ObjectID(); } -#endif return data.instance->get_instance_id(); } virtual void call(const Variant **p_arguments, int p_argcount, Variant &r_return_value, Callable::CallError &r_call_error) const { -#ifdef DEBUG_ENABLED ERR_FAIL_NULL_MSG(ObjectDB::get_instance(ObjectID(data.object_id)), "Invalid Object id '" + uitos(data.object_id) + "', can't call method."); -#endif call_with_variant_args_ret(data.instance, data.method, p_arguments, p_argcount, r_return_value, r_call_error); } CallableCustomMethodPointerRet(T *p_instance, R (T::*p_method)(P...)) { memset(&data, 0, sizeof(Data)); // Clear beforehand, may have padding bytes. data.instance = p_instance; -#ifdef DEBUG_ENABLED data.object_id = p_instance->get_instance_id(); -#endif data.method = p_method; _setup((uint32_t *)&data, sizeof(Data)); } @@ -190,36 +174,28 @@ template <class T, class R, class... P> class CallableCustomMethodPointerRetC : public CallableCustomMethodPointerBase { struct Data { T *instance; -#ifdef DEBUG_ENABLED uint64_t object_id; -#endif R(T::*method) (P...) const; } data; public: virtual ObjectID get_object() const override { -#ifdef DEBUG_ENABLED if (ObjectDB::get_instance(ObjectID(data.object_id)) == nullptr) { return ObjectID(); } -#endif return data.instance->get_instance_id(); } virtual void call(const Variant **p_arguments, int p_argcount, Variant &r_return_value, Callable::CallError &r_call_error) const override { -#ifdef DEBUG_ENABLED ERR_FAIL_NULL_MSG(ObjectDB::get_instance(ObjectID(data.object_id)), "Invalid Object id '" + uitos(data.object_id) + "', can't call method."); -#endif call_with_variant_args_retc(data.instance, data.method, p_arguments, p_argcount, r_return_value, r_call_error); } CallableCustomMethodPointerRetC(T *p_instance, R (T::*p_method)(P...) const) { memset(&data, 0, sizeof(Data)); // Clear beforehand, may have padding bytes. data.instance = p_instance; -#ifdef DEBUG_ENABLED data.object_id = p_instance->get_instance_id(); -#endif data.method = p_method; _setup((uint32_t *)&data, sizeof(Data)); } diff --git a/core/object/class_db.cpp b/core/object/class_db.cpp index bf1bd0de93..f2a9a68d08 100644 --- a/core/object/class_db.cpp +++ b/core/object/class_db.cpp @@ -31,6 +31,7 @@ #include "class_db.h" #include "core/config/engine.h" +#include "core/core_string_names.h" #include "core/io/resource_loader.h" #include "core/object/script_language.h" #include "core/os/mutex.h" @@ -1299,6 +1300,12 @@ bool ClassDB::get_property(Object *p_object, const StringName &p_property, Varia check = check->inherits_ptr; } + // The "free()" method is special, so we assume it exists and return a Callable. + if (p_property == CoreStringNames::get_singleton()->_free) { + r_value = Callable(p_object, p_property); + return true; + } + return false; } @@ -1608,6 +1615,28 @@ void ClassDB::get_virtual_methods(const StringName &p_class, List<MethodInfo> *p #endif } +void ClassDB::add_extension_class_virtual_method(const StringName &p_class, const GDExtensionClassVirtualMethodInfo *p_method_info) { + ERR_FAIL_COND_MSG(!classes.has(p_class), "Request for nonexistent class '" + p_class + "'."); + +#ifdef DEBUG_METHODS_ENABLED + PackedStringArray arg_names; + + MethodInfo mi; + mi.name = *reinterpret_cast<StringName *>(p_method_info->name); + mi.return_val = PropertyInfo(p_method_info->return_value); + mi.return_val_metadata = p_method_info->return_value_metadata; + mi.flags = p_method_info->method_flags; + for (int i = 0; i < (int)p_method_info->argument_count; i++) { + PropertyInfo arg(p_method_info->arguments[i]); + mi.arguments.push_back(arg); + mi.arguments_metadata.push_back(p_method_info->arguments_metadata[i]); + arg_names.push_back(arg.name); + } + + add_virtual_method(p_class, mi, true, arg_names); +#endif +} + void ClassDB::set_class_enabled(const StringName &p_class, bool p_enable) { OBJTYPE_WLOCK; diff --git a/core/object/class_db.h b/core/object/class_db.h index 7a4ee1afa4..c910b30d11 100644 --- a/core/object/class_db.h +++ b/core/object/class_db.h @@ -188,7 +188,7 @@ public: template <class T> static void register_class(bool p_virtual = false) { GLOBAL_LOCK_FUNCTION; - static_assert(TypesAreSame<typename T::self_type, T>::value, "Class not declared properly, please use GDCLASS."); + static_assert(types_are_same_v<typename T::self_type, T>, "Class not declared properly, please use GDCLASS."); T::initialize_class(); ClassInfo *t = classes.getptr(T::get_class_static()); ERR_FAIL_NULL(t); @@ -203,7 +203,7 @@ public: template <class T> static void register_abstract_class() { GLOBAL_LOCK_FUNCTION; - static_assert(TypesAreSame<typename T::self_type, T>::value, "Class not declared properly, please use GDCLASS."); + static_assert(types_are_same_v<typename T::self_type, T>, "Class not declared properly, please use GDCLASS."); T::initialize_class(); ClassInfo *t = classes.getptr(T::get_class_static()); ERR_FAIL_NULL(t); @@ -216,7 +216,7 @@ public: template <class T> static void register_internal_class() { GLOBAL_LOCK_FUNCTION; - static_assert(TypesAreSame<typename T::self_type, T>::value, "Class not declared properly, please use GDCLASS."); + static_assert(types_are_same_v<typename T::self_type, T>, "Class not declared properly, please use GDCLASS."); T::initialize_class(); ClassInfo *t = classes.getptr(T::get_class_static()); ERR_FAIL_NULL(t); @@ -239,7 +239,7 @@ public: template <class T> static void register_custom_instance_class() { GLOBAL_LOCK_FUNCTION; - static_assert(TypesAreSame<typename T::self_type, T>::value, "Class not declared properly, please use GDCLASS."); + static_assert(types_are_same_v<typename T::self_type, T>, "Class not declared properly, please use GDCLASS."); T::initialize_class(); ClassInfo *t = classes.getptr(T::get_class_static()); ERR_FAIL_NULL(t); @@ -296,7 +296,7 @@ public: argptrs[i] = &args[i]; } MethodBind *bind = create_method_bind(p_method); - if constexpr (std::is_same<typename member_function_traits<M>::return_type, Object *>::value) { + if constexpr (std::is_same_v<typename member_function_traits<M>::return_type, Object *>) { bind->set_return_type_is_raw_object_ptr(true); } return bind_methodfi(METHOD_FLAGS_DEFAULT, bind, false, p_method_name, sizeof...(p_args) == 0 ? nullptr : (const Variant **)argptrs, sizeof...(p_args)); @@ -311,7 +311,7 @@ public: } MethodBind *bind = create_static_method_bind(p_method); bind->set_instance_class(p_class); - if constexpr (std::is_same<typename member_function_traits<M>::return_type, Object *>::value) { + if constexpr (std::is_same_v<typename member_function_traits<M>::return_type, Object *>) { bind->set_return_type_is_raw_object_ptr(true); } return bind_methodfi(METHOD_FLAGS_DEFAULT, bind, false, p_method_name, sizeof...(p_args) == 0 ? nullptr : (const Variant **)argptrs, sizeof...(p_args)); @@ -325,7 +325,7 @@ public: argptrs[i] = &args[i]; } MethodBind *bind = create_method_bind(p_method); - if constexpr (std::is_same<typename member_function_traits<M>::return_type, Object *>::value) { + if constexpr (std::is_same_v<typename member_function_traits<M>::return_type, Object *>) { bind->set_return_type_is_raw_object_ptr(true); } return bind_methodfi(METHOD_FLAGS_DEFAULT, bind, true, p_method_name, sizeof...(p_args) == 0 ? nullptr : (const Variant **)argptrs, sizeof...(p_args)); @@ -340,7 +340,7 @@ public: } MethodBind *bind = create_static_method_bind(p_method); bind->set_instance_class(p_class); - if constexpr (std::is_same<typename member_function_traits<M>::return_type, Object *>::value) { + if constexpr (std::is_same_v<typename member_function_traits<M>::return_type, Object *>) { bind->set_return_type_is_raw_object_ptr(true); } return bind_methodfi(METHOD_FLAGS_DEFAULT, bind, true, p_method_name, sizeof...(p_args) == 0 ? nullptr : (const Variant **)argptrs, sizeof...(p_args)); @@ -353,7 +353,7 @@ public: MethodBind *bind = create_vararg_method_bind(p_method, p_info, p_return_nil_is_variant); ERR_FAIL_NULL_V(bind, nullptr); - if constexpr (std::is_same<typename member_function_traits<M>::return_type, Object *>::value) { + if constexpr (std::is_same_v<typename member_function_traits<M>::return_type, Object *>) { bind->set_return_type_is_raw_object_ptr(true); } return _bind_vararg_method(bind, p_name, p_default_args, false); @@ -366,7 +366,7 @@ public: MethodBind *bind = create_vararg_method_bind(p_method, p_info, p_return_nil_is_variant); ERR_FAIL_NULL_V(bind, nullptr); - if constexpr (std::is_same<typename member_function_traits<M>::return_type, Object *>::value) { + if constexpr (std::is_same_v<typename member_function_traits<M>::return_type, Object *>) { bind->set_return_type_is_raw_object_ptr(true); } return _bind_vararg_method(bind, p_name, p_default_args, true); @@ -410,6 +410,7 @@ public: static void add_virtual_method(const StringName &p_class, const MethodInfo &p_method, bool p_virtual = true, const Vector<String> &p_arg_names = Vector<String>(), bool p_object_core = false); static void get_virtual_methods(const StringName &p_class, List<MethodInfo> *p_methods, bool p_no_inheritance = false); + static void add_extension_class_virtual_method(const StringName &p_class, const GDExtensionClassVirtualMethodInfo *p_method_info); static void bind_integer_constant(const StringName &p_class, const StringName &p_enum, const StringName &p_name, int64_t p_constant, bool p_is_bitfield = false); static void get_integer_constant_list(const StringName &p_class, List<String> *p_constants, bool p_no_inheritance = false); diff --git a/core/object/make_virtuals.py b/core/object/make_virtuals.py index 79a8df6c8a..ae70981f72 100644 --- a/core/object/make_virtuals.py +++ b/core/object/make_virtuals.py @@ -1,78 +1,74 @@ -proto = """ -#define GDVIRTUAL$VER($RET m_name $ARG) \\ -StringName _gdvirtual_##m_name##_sn = #m_name;\\ -mutable bool _gdvirtual_##m_name##_initialized = false;\\ -mutable void* _gdvirtual_##m_name = nullptr;\\ -template<bool required>\\ -_FORCE_INLINE_ bool _gdvirtual_##m_name##_call($CALLARGS) $CONST { \\ - ScriptInstance *_script_instance = ((Object*)(this))->get_script_instance();\\ - if (_script_instance) {\\ - Callable::CallError ce; \\ - $CALLSIARGS\\ - $CALLSIBEGIN_script_instance->callp(_gdvirtual_##m_name##_sn, $CALLSIARGPASS, ce);\\ - if (ce.error == Callable::CallError::CALL_OK) {\\ - $CALLSIRET\\ +proto = """#define GDVIRTUAL$VER($RET m_name $ARG)\\ + StringName _gdvirtual_##m_name##_sn = #m_name;\\ + mutable bool _gdvirtual_##m_name##_initialized = false;\\ + mutable void *_gdvirtual_##m_name = nullptr;\\ + template <bool required>\\ + _FORCE_INLINE_ bool _gdvirtual_##m_name##_call($CALLARGS) $CONST {\\ + ScriptInstance *_script_instance = ((Object *)(this))->get_script_instance();\\ + if (_script_instance) {\\ + Callable::CallError ce;\\ + $CALLSIARGS\\ + $CALLSIBEGIN_script_instance->callp(_gdvirtual_##m_name##_sn, $CALLSIARGPASS, ce);\\ + if (ce.error == Callable::CallError::CALL_OK) {\\ + $CALLSIRET\\ + return true;\\ + }\\ + }\\ + if (unlikely(_get_extension() && !_gdvirtual_##m_name##_initialized)) {\\ + _gdvirtual_##m_name = nullptr;\\ + if (_get_extension()->get_virtual_call_data && _get_extension()->call_virtual_with_data) {\\ + _gdvirtual_##m_name = _get_extension()->get_virtual_call_data(_get_extension()->class_userdata, &_gdvirtual_##m_name##_sn);\\ + } else if (_get_extension()->get_virtual) {\\ + _gdvirtual_##m_name = (void *)_get_extension()->get_virtual(_get_extension()->class_userdata, &_gdvirtual_##m_name##_sn);\\ + }\\ + GDVIRTUAL_TRACK(_gdvirtual_##m_name, _gdvirtual_##m_name##_initialized);\\ + _gdvirtual_##m_name##_initialized = true;\\ + }\\ + if (_gdvirtual_##m_name) {\\ + $CALLPTRARGS\\ + $CALLPTRRETDEF\\ + if (_get_extension()->get_virtual_call_data && _get_extension()->call_virtual_with_data) {\\ + _get_extension()->call_virtual_with_data(_get_extension_instance(), &_gdvirtual_##m_name##_sn, _gdvirtual_##m_name, $CALLPTRARGPASS, $CALLPTRRETPASS);\\ + $CALLPTRRET\\ + } else {\\ + ((GDExtensionClassCallVirtual)_gdvirtual_##m_name)(_get_extension_instance(), $CALLPTRARGPASS, $CALLPTRRETPASS);\\ + $CALLPTRRET\\ + }\\ return true;\\ - } \\ + }\\ + if (required) {\\ + ERR_PRINT_ONCE("Required virtual method " + get_class() + "::" + #m_name + " must be overridden before calling.");\\ + $RVOID\\ + }\\ + return false;\\ }\\ - if (unlikely(_get_extension() && !_gdvirtual_##m_name##_initialized)) {\\ - _gdvirtual_##m_name = nullptr;\\ - if (_get_extension()->get_virtual_call_data && _get_extension()->call_virtual_with_data) {\\ - _gdvirtual_##m_name = _get_extension()->get_virtual_call_data(_get_extension()->class_userdata, &_gdvirtual_##m_name##_sn);\\ - } else if (_get_extension()->get_virtual) {\\ - _gdvirtual_##m_name = (void *)_get_extension()->get_virtual(_get_extension()->class_userdata, &_gdvirtual_##m_name##_sn);\\ - }\\ - GDVIRTUAL_TRACK(_gdvirtual_##m_name, _gdvirtual_##m_name##_initialized); \\ - _gdvirtual_##m_name##_initialized = true;\\ - }\\ - if (_gdvirtual_##m_name) {\\ - $CALLPTRARGS\\ - $CALLPTRRETDEF\\ - if (_get_extension()->get_virtual_call_data && _get_extension()->call_virtual_with_data) {\\ - _get_extension()->call_virtual_with_data(_get_extension_instance(), &_gdvirtual_##m_name##_sn, _gdvirtual_##m_name, $CALLPTRARGPASS,$CALLPTRRETPASS);\\ - $CALLPTRRET\\ - } else {\\ - ((GDExtensionClassCallVirtual)_gdvirtual_##m_name)(_get_extension_instance(),$CALLPTRARGPASS,$CALLPTRRETPASS);\\ - $CALLPTRRET\\ - }\\ - return true;\\ - }\\ - \\ - if (required) {\\ - ERR_PRINT_ONCE("Required virtual method " + get_class() + "::" + #m_name + " must be overridden before calling.");\\ - $RVOID\\ - }\\ -\\ - return false;\\ -}\\ -_FORCE_INLINE_ bool _gdvirtual_##m_name##_overridden() const { \\ - ScriptInstance *_script_instance = ((Object*)(this))->get_script_instance();\\ - if (_script_instance && _script_instance->has_method(_gdvirtual_##m_name##_sn)) {\\ - return true;\\ - }\\ - if (unlikely(_get_extension() && !_gdvirtual_##m_name##_initialized)) {\\ - _gdvirtual_##m_name = nullptr;\\ - if (_get_extension()->get_virtual_call_data && _get_extension()->call_virtual_with_data) {\\ - _gdvirtual_##m_name = _get_extension()->get_virtual_call_data(_get_extension()->class_userdata, &_gdvirtual_##m_name##_sn);\\ - } else if (_get_extension()->get_virtual) {\\ - _gdvirtual_##m_name = (void *)_get_extension()->get_virtual(_get_extension()->class_userdata, &_gdvirtual_##m_name##_sn);\\ - }\\ - GDVIRTUAL_TRACK(_gdvirtual_##m_name, _gdvirtual_##m_name##_initialized); \\ - _gdvirtual_##m_name##_initialized = true;\\ - }\\ - if (_gdvirtual_##m_name) {\\ - return true;\\ + _FORCE_INLINE_ bool _gdvirtual_##m_name##_overridden() const {\\ + ScriptInstance *_script_instance = ((Object *)(this))->get_script_instance();\\ + if (_script_instance && _script_instance->has_method(_gdvirtual_##m_name##_sn)) {\\ + return true;\\ + }\\ + if (unlikely(_get_extension() && !_gdvirtual_##m_name##_initialized)) {\\ + _gdvirtual_##m_name = nullptr;\\ + if (_get_extension()->get_virtual_call_data && _get_extension()->call_virtual_with_data) {\\ + _gdvirtual_##m_name = _get_extension()->get_virtual_call_data(_get_extension()->class_userdata, &_gdvirtual_##m_name##_sn);\\ + } else if (_get_extension()->get_virtual) {\\ + _gdvirtual_##m_name = (void *)_get_extension()->get_virtual(_get_extension()->class_userdata, &_gdvirtual_##m_name##_sn);\\ + }\\ + GDVIRTUAL_TRACK(_gdvirtual_##m_name, _gdvirtual_##m_name##_initialized);\\ + _gdvirtual_##m_name##_initialized = true;\\ + }\\ + if (_gdvirtual_##m_name) {\\ + return true;\\ + }\\ + return false;\\ }\\ - return false;\\ -}\\ -\\ -_FORCE_INLINE_ static MethodInfo _gdvirtual_##m_name##_get_method_info() { \\ - MethodInfo method_info;\\ - method_info.name = #m_name;\\ - method_info.flags = METHOD_FLAG_VIRTUAL;\\ - $FILL_METHOD_INFO\\ - return method_info;\\ -} + _FORCE_INLINE_ static MethodInfo _gdvirtual_##m_name##_get_method_info() {\\ + MethodInfo method_info;\\ + method_info.name = #m_name;\\ + method_info.flags = $METHOD_FLAGS;\\ + $FILL_METHOD_INFO\\ + return method_info;\\ + } """ @@ -83,22 +79,23 @@ def generate_version(argcount, const=False, returns=False): method_info = "" if returns: sproto += "R" - s = s.replace("$RET", "m_ret, ") + s = s.replace("$RET", "m_ret,") s = s.replace("$RVOID", "(void)r_ret;") # If required, may lead to uninitialized errors s = s.replace("$CALLPTRRETDEF", "PtrToArg<m_ret>::EncodeT ret;") - method_info += "\tmethod_info.return_val = GetTypeInfo<m_ret>::get_class_info();\\\n" - method_info += "\tmethod_info.return_val_metadata = GetTypeInfo<m_ret>::METADATA;\\\n" + method_info += "method_info.return_val = GetTypeInfo<m_ret>::get_class_info();\\\n" + method_info += "\t\tmethod_info.return_val_metadata = GetTypeInfo<m_ret>::METADATA;" else: - s = s.replace("$RET", "") - s = s.replace("$RVOID", "") - s = s.replace("$CALLPTRRETDEF", "") + s = s.replace("$RET ", "") + s = s.replace("\t\t\t$RVOID\\\n", "") + s = s.replace("\t\t\t$CALLPTRRETDEF\\\n", "") if const: sproto += "C" s = s.replace("$CONST", "const") - method_info += "\tmethod_info.flags|=METHOD_FLAG_CONST;\\\n" + s = s.replace("$METHOD_FLAGS", "METHOD_FLAG_VIRTUAL | METHOD_FLAG_CONST") else: - s = s.replace("$CONST", "") + s = s.replace("$CONST ", "") + s = s.replace("$METHOD_FLAGS", "METHOD_FLAG_VIRTUAL") s = s.replace("$VER", sproto) argtext = "" @@ -108,9 +105,9 @@ def generate_version(argcount, const=False, returns=False): callptrargsptr = "" if argcount > 0: argtext += ", " - callsiargs = "Variant vargs[" + str(argcount) + "]={" - callsiargptrs = "\t\tconst Variant *vargptrs[" + str(argcount) + "]={" - callptrargsptr = "\t\tGDExtensionConstTypePtr argptrs[" + str(argcount) + "]={" + callsiargs = f"Variant vargs[{argcount}] = {{ " + callsiargptrs = f"\t\t\tconst Variant *vargptrs[{argcount}] = {{ " + callptrargsptr = f"\t\t\tGDExtensionConstTypePtr argptrs[{argcount}] = {{ " callptrargs = "" for i in range(argcount): if i > 0: @@ -118,52 +115,55 @@ def generate_version(argcount, const=False, returns=False): callargtext += ", " callsiargs += ", " callsiargptrs += ", " - callptrargs += "\t\t" + callptrargs += "\t\t\t" callptrargsptr += ", " - argtext += "m_type" + str(i + 1) - callargtext += "m_type" + str(i + 1) + " arg" + str(i + 1) - callsiargs += "Variant(arg" + str(i + 1) + ")" - callsiargptrs += "&vargs[" + str(i) + "]" + argtext += f"m_type{i + 1}" + callargtext += f"m_type{i + 1} arg{i + 1}" + callsiargs += f"Variant(arg{i + 1})" + callsiargptrs += f"&vargs[{i}]" callptrargs += ( - "PtrToArg<m_type" + str(i + 1) + ">::EncodeT argval" + str(i + 1) + " = arg" + str(i + 1) + ";\\\n" - ) - callptrargsptr += "&argval" + str(i + 1) - method_info += "\tmethod_info.arguments.push_back(GetTypeInfo<m_type" + str(i + 1) + ">::get_class_info());\\\n" - method_info += ( - "\tmethod_info.arguments_metadata.push_back(GetTypeInfo<m_type" + str(i + 1) + ">::METADATA);\\\n" + f"PtrToArg<m_type{i + 1}>::EncodeT argval{i + 1} = (PtrToArg<m_type{i + 1}>::EncodeT)arg{i + 1};\\\n" ) + callptrargsptr += f"&argval{i + 1}" + if method_info: + method_info += "\\\n\t\t" + method_info += f"method_info.arguments.push_back(GetTypeInfo<m_type{i + 1}>::get_class_info());\\\n" + method_info += f"\t\tmethod_info.arguments_metadata.push_back(GetTypeInfo<m_type{i + 1}>::METADATA);" if argcount: - callsiargs += "};\\\n" - callsiargptrs += "};\\\n" + callsiargs += " };\\\n" + callsiargptrs += " };" s = s.replace("$CALLSIARGS", callsiargs + callsiargptrs) - s = s.replace("$CALLSIARGPASS", "(const Variant **)vargptrs," + str(argcount)) - callptrargsptr += "};\\\n" + s = s.replace("$CALLSIARGPASS", f"(const Variant **)vargptrs, {argcount}") + callptrargsptr += " };" s = s.replace("$CALLPTRARGS", callptrargs + callptrargsptr) - s = s.replace("$CALLPTRARGPASS", "reinterpret_cast<GDExtensionConstTypePtr*>(argptrs)") + s = s.replace("$CALLPTRARGPASS", "reinterpret_cast<GDExtensionConstTypePtr *>(argptrs)") else: - s = s.replace("$CALLSIARGS", "") + s = s.replace("\t\t\t$CALLSIARGS\\\n", "") s = s.replace("$CALLSIARGPASS", "nullptr, 0") - s = s.replace("$CALLPTRARGS", "") + s = s.replace("\t\t\t$CALLPTRARGS\\\n", "") s = s.replace("$CALLPTRARGPASS", "nullptr") if returns: if argcount > 0: - callargtext += "," - callargtext += " m_ret& r_ret" + callargtext += ", " + callargtext += "m_ret &r_ret" s = s.replace("$CALLSIBEGIN", "Variant ret = ") s = s.replace("$CALLSIRET", "r_ret = VariantCaster<m_ret>::cast(ret);") s = s.replace("$CALLPTRRETPASS", "&ret") s = s.replace("$CALLPTRRET", "r_ret = (m_ret)ret;") else: s = s.replace("$CALLSIBEGIN", "") - s = s.replace("$CALLSIRET", "") + s = s.replace("\t\t\t\t$CALLSIRET\\\n", "") s = s.replace("$CALLPTRRETPASS", "nullptr") - s = s.replace("$CALLPTRRET", "") + s = s.replace("\t\t\t\t$CALLPTRRET\\\n", "") - s = s.replace("$ARG", argtext) + s = s.replace(" $ARG", argtext) s = s.replace("$CALLARGS", callargtext) - s = s.replace("$FILL_METHOD_INFO", method_info) + if method_info: + s = s.replace("$FILL_METHOD_INFO", method_info) + else: + s = s.replace("\t\t$FILL_METHOD_INFO\\\n", method_info) return s @@ -171,21 +171,21 @@ def generate_version(argcount, const=False, returns=False): def run(target, source, env): max_versions = 12 - txt = """ + txt = """/* THIS FILE IS GENERATED DO NOT EDIT */ #ifndef GDVIRTUAL_GEN_H #define GDVIRTUAL_GEN_H #include "core/object/script_instance.h" #ifdef TOOLS_ENABLED -#define GDVIRTUAL_TRACK(m_virtual, m_initialized) \\ - if (_get_extension()->reloadable) {\\ - VirtualMethodTracker *tracker = memnew(VirtualMethodTracker);\\ - tracker->method = (void **)&m_virtual;\\ - tracker->initialized = &m_initialized;\\ - tracker->next = virtual_method_list;\\ - virtual_method_list = tracker;\\ - } +#define GDVIRTUAL_TRACK(m_virtual, m_initialized)\\ + if (_get_extension()->reloadable) {\\ + VirtualMethodTracker *tracker = memnew(VirtualMethodTracker);\\ + tracker->method = (void **)&m_virtual;\\ + tracker->initialized = &m_initialized;\\ + tracker->next = virtual_method_list;\\ + virtual_method_list = tracker;\\ + } #else #define GDVIRTUAL_TRACK(m_virtual, m_initialized) #endif @@ -193,13 +193,13 @@ def run(target, source, env): """ for i in range(max_versions + 1): - txt += "/* " + str(i) + " Arguments */\n\n" + txt += f"/* {i} Arguments */\n\n" txt += generate_version(i, False, False) txt += generate_version(i, False, True) txt += generate_version(i, True, False) txt += generate_version(i, True, True) - txt += "#endif" + txt += "#endif // GDVIRTUAL_GEN_H\n" with open(target[0], "w") as f: f.write(txt) diff --git a/core/object/message_queue.cpp b/core/object/message_queue.cpp index de71295ee5..83a19554dc 100644 --- a/core/object/message_queue.cpp +++ b/core/object/message_queue.cpp @@ -35,15 +35,17 @@ #include "core/object/class_db.h" #include "core/object/script_language.h" +#include <stdio.h> + #ifdef DEV_ENABLED // Includes safety checks to ensure that a queue set as a thread singleton override // is only ever called from the thread it was set for. -#define LOCK_MUTEX \ - if (this != MessageQueue::thread_singleton) { \ - DEV_ASSERT(!this->is_current_thread_override); \ - mutex.lock(); \ - } else { \ - DEV_ASSERT(this->is_current_thread_override); \ +#define LOCK_MUTEX \ + if (this != MessageQueue::thread_singleton) { \ + DEV_ASSERT(!is_current_thread_override); \ + mutex.lock(); \ + } else { \ + DEV_ASSERT(is_current_thread_override); \ } #else #define LOCK_MUTEX \ @@ -93,7 +95,7 @@ Error CallQueue::push_callablep(const Callable &p_callable, const Variant **p_ar if ((page_bytes[pages_used - 1] + room_needed) > uint32_t(PAGE_SIZE_BYTES)) { if (pages_used == max_pages) { - ERR_PRINT("Failed method: " + p_callable + ". Message queue out of memory. " + error_text); + fprintf(stderr, "Failed method: %s. Message queue out of memory. %s\n", String(p_callable).utf8().get_data(), error_text.utf8().get_data()); statistics(); UNLOCK_MUTEX; return ERR_OUT_OF_MEMORY; @@ -144,7 +146,7 @@ Error CallQueue::push_set(ObjectID p_id, const StringName &p_prop, const Variant if (ObjectDB::get_instance(p_id)) { type = ObjectDB::get_instance(p_id)->get_class(); } - ERR_PRINT("Failed set: " + type + ":" + p_prop + " target ID: " + itos(p_id) + ". Message queue out of memory. " + error_text); + fprintf(stderr, "Failed set: %s: %s target ID: %s. Message queue out of memory. %s\n", type.utf8().get_data(), String(p_prop).utf8().get_data(), itos(p_id).utf8().get_data(), error_text.utf8().get_data()); statistics(); UNLOCK_MUTEX; @@ -181,7 +183,7 @@ Error CallQueue::push_notification(ObjectID p_id, int p_notification) { if ((page_bytes[pages_used - 1] + room_needed) > uint32_t(PAGE_SIZE_BYTES)) { if (pages_used == max_pages) { - ERR_PRINT("Failed notification: " + itos(p_notification) + " target ID: " + itos(p_id) + ". Message queue out of memory. " + error_text); + fprintf(stderr, "Failed notification: %s target ID: %s. Message queue out of memory. %s\n", itos(p_notification).utf8().get_data(), itos(p_id).utf8().get_data(), error_text.utf8().get_data()); statistics(); UNLOCK_MUTEX; return ERR_OUT_OF_MEMORY; diff --git a/core/object/object.cpp b/core/object/object.cpp index 40df13849b..cc33d0ab8a 100644 --- a/core/object/object.cpp +++ b/core/object/object.cpp @@ -728,7 +728,7 @@ Variant Object::callp(const StringName &p_method, const Variant **p_args, int p_ r_error.expected = 0; return Variant(); } - if (Object::cast_to<RefCounted>(this)) { + if (is_ref_counted()) { r_error.error = Callable::CallError::CALL_ERROR_INVALID_METHOD; ERR_FAIL_V_MSG(Variant(), "Can't 'free' a reference."); } @@ -1347,12 +1347,10 @@ Error Object::connect(const StringName &p_signal, const Callable &p_callable, ui s = &signal_map[p_signal]; } - Callable target = p_callable; - //compare with the base callable, so binds can be ignored - if (s->slot_map.has(*target.get_base_comparator())) { + if (s->slot_map.has(*p_callable.get_base_comparator())) { if (p_flags & CONNECT_REFERENCE_COUNTED) { - s->slot_map[*target.get_base_comparator()].reference_count++; + s->slot_map[*p_callable.get_base_comparator()].reference_count++; return OK; } else { ERR_FAIL_V_MSG(ERR_INVALID_PARAMETER, "Signal '" + p_signal + "' is already connected to given callable '" + p_callable + "' in that object."); @@ -1364,7 +1362,7 @@ Error Object::connect(const StringName &p_signal, const Callable &p_callable, ui SignalData::Slot slot; Connection conn; - conn.callable = target; + conn.callable = p_callable; conn.signal = ::Signal(this, p_signal); conn.flags = p_flags; slot.conn = conn; @@ -1376,7 +1374,7 @@ Error Object::connect(const StringName &p_signal, const Callable &p_callable, ui } //use callable version as key, so binds can be ignored - s->slot_map[*target.get_base_comparator()] = slot; + s->slot_map[*p_callable.get_base_comparator()] = slot; return OK; } @@ -1397,9 +1395,7 @@ bool Object::is_connected(const StringName &p_signal, const Callable &p_callable ERR_FAIL_V_MSG(false, "Nonexistent signal: " + p_signal + "."); } - Callable target = p_callable; - - return s->slot_map.has(*target.get_base_comparator()); + return s->slot_map.has(*p_callable.get_base_comparator()); } void Object::disconnect(const StringName &p_signal, const Callable &p_callable) { @@ -1687,6 +1683,7 @@ void Object::_bind_methods() { BIND_CONSTANT(NOTIFICATION_POSTINITIALIZE); BIND_CONSTANT(NOTIFICATION_PREDELETE); + BIND_CONSTANT(NOTIFICATION_EXTENSION_RELOADED); BIND_ENUM_CONSTANT(CONNECT_DEFERRED); BIND_ENUM_CONSTANT(CONNECT_PERSIST); diff --git a/core/object/object.h b/core/object/object.h index 7b53fcaa41..d697f14b7e 100644 --- a/core/object/object.h +++ b/core/object/object.h @@ -235,7 +235,7 @@ struct MethodInfo { return arguments_metadata.size() > p_arg ? arguments_metadata[p_arg] : 0; } - inline bool operator==(const MethodInfo &p_method) const { return id == p_method.id; } + inline bool operator==(const MethodInfo &p_method) const { return id == p_method.id && name == p_method.name; } inline bool operator<(const MethodInfo &p_method) const { return id == p_method.id ? (name < p_method.name) : (id < p_method.id); } operator Dictionary() const; @@ -656,7 +656,7 @@ private: friend class RefCounted; bool type_is_reference = false; - std::mutex _instance_binding_mutex; + BinaryMutex _instance_binding_mutex; struct InstanceBinding { void *binding = nullptr; void *token = nullptr; diff --git a/core/object/ref_counted.h b/core/object/ref_counted.h index 228373d662..10be27b879 100644 --- a/core/object/ref_counted.h +++ b/core/object/ref_counted.h @@ -212,8 +212,9 @@ public: reference = nullptr; } - void instantiate() { - ref(memnew(T)); + template <typename... VarArgs> + void instantiate(VarArgs... p_params) { + ref(memnew(T(p_params...))); } Ref() {} diff --git a/core/object/script_language.cpp b/core/object/script_language.cpp index 086f8a666e..3b9b1f9094 100644 --- a/core/object/script_language.cpp +++ b/core/object/script_language.cpp @@ -138,6 +138,8 @@ void Script::_bind_methods() { ClassDB::bind_method(D_METHOD("get_base_script"), &Script::get_base_script); ClassDB::bind_method(D_METHOD("get_instance_base_type"), &Script::get_instance_base_type); + ClassDB::bind_method(D_METHOD("get_global_name"), &Script::get_global_name); + ClassDB::bind_method(D_METHOD("has_script_signal", "signal_name"), &Script::has_script_signal); ClassDB::bind_method(D_METHOD("get_script_property_list"), &Script::_get_script_property_list); @@ -166,6 +168,18 @@ ScriptLanguage *ScriptServer::get_language(int p_idx) { return _languages[p_idx]; } +ScriptLanguage *ScriptServer::get_language_for_extension(const String &p_extension) { + MutexLock lock(languages_mutex); + + for (int i = 0; i < _language_count; i++) { + if (_languages[i] && _languages[i]->get_extension() == p_extension) { + return _languages[i]; + } + } + + return nullptr; +} + Error ScriptServer::register_language(ScriptLanguage *p_language) { MutexLock lock(languages_mutex); ERR_FAIL_NULL_V(p_language, ERR_INVALID_PARAMETER); @@ -631,6 +645,10 @@ bool PlaceHolderScriptInstance::has_method(const StringName &p_method) const { void PlaceHolderScriptInstance::update(const List<PropertyInfo> &p_properties, const HashMap<StringName, Variant> &p_values) { HashSet<StringName> new_values; for (const PropertyInfo &E : p_properties) { + if (E.usage & (PROPERTY_USAGE_GROUP | PROPERTY_USAGE_SUBGROUP | PROPERTY_USAGE_CATEGORY)) { + continue; + } + StringName n = E.name; new_values.insert(n); diff --git a/core/object/script_language.h b/core/object/script_language.h index 85e64c8d62..294231a3e7 100644 --- a/core/object/script_language.h +++ b/core/object/script_language.h @@ -75,6 +75,7 @@ public: static bool is_scripting_enabled(); _FORCE_INLINE_ static int get_language_count() { return _language_count; } static ScriptLanguage *get_language(int p_idx); + static ScriptLanguage *get_language_for_extension(const String &p_extension); static Error register_language(ScriptLanguage *p_language); static Error unregister_language(const ScriptLanguage *p_language); @@ -243,7 +244,7 @@ public: virtual void get_doc_comment_delimiters(List<String> *p_delimiters) const = 0; virtual void get_string_delimiters(List<String> *p_delimiters) const = 0; virtual Ref<Script> make_template(const String &p_template, const String &p_class_name, const String &p_base_class_name) const { return Ref<Script>(); } - virtual Vector<ScriptTemplate> get_built_in_templates(StringName p_object) { return Vector<ScriptTemplate>(); } + virtual Vector<ScriptTemplate> get_built_in_templates(const StringName &p_object) { return Vector<ScriptTemplate>(); } virtual bool is_using_templates() { return false; } virtual bool validate(const String &p_script, const String &p_path = "", List<String> *r_functions = nullptr, List<ScriptError> *r_errors = nullptr, List<Warning> *r_warnings = nullptr, HashSet<int> *r_safe_lines = nullptr) const = 0; virtual String validate_path(const String &p_path) const { return ""; } @@ -371,6 +372,7 @@ public: virtual Vector<StackInfo> debug_get_current_stack_info() { return Vector<StackInfo>(); } virtual void reload_all_scripts() = 0; + virtual void reload_scripts(const Array &p_scripts, bool p_soft_reload) = 0; virtual void reload_tool_script(const Ref<Script> &p_script, bool p_soft_reload) = 0; /* LOADER FUNCTIONS */ @@ -384,10 +386,12 @@ public: uint64_t call_count; uint64_t total_time; uint64_t self_time; + uint64_t internal_time; }; virtual void profiling_start() = 0; virtual void profiling_stop() = 0; + virtual void profiling_set_save_native_calls(bool p_enable) = 0; virtual int profiling_get_accumulated_data(ProfilingInfo *p_info_arr, int p_info_max) = 0; virtual int profiling_get_frame_data(ProfilingInfo *p_info_arr, int p_info_max) = 0; diff --git a/core/object/script_language_extension.cpp b/core/object/script_language_extension.cpp index e326baf7eb..be62cabe25 100644 --- a/core/object/script_language_extension.cpp +++ b/core/object/script_language_extension.cpp @@ -107,7 +107,7 @@ void ScriptLanguageExtension::_bind_methods() { GDVIRTUAL_BIND(_supports_builtin_mode); GDVIRTUAL_BIND(_supports_documentation); GDVIRTUAL_BIND(_can_inherit_from_file); - GDVIRTUAL_BIND(_find_function, "class_name", "function_name"); + GDVIRTUAL_BIND(_find_function, "function", "code"); GDVIRTUAL_BIND(_make_function, "class_name", "function_name", "function_args"); GDVIRTUAL_BIND(_open_in_external_editor, "script", "line", "column"); GDVIRTUAL_BIND(_overrides_external_editor); @@ -145,6 +145,7 @@ void ScriptLanguageExtension::_bind_methods() { GDVIRTUAL_BIND(_profiling_start); GDVIRTUAL_BIND(_profiling_stop); + GDVIRTUAL_BIND(_profiling_set_save_native_calls, "enable"); GDVIRTUAL_BIND(_profiling_get_accumulated_data, "info_array", "info_max"); GDVIRTUAL_BIND(_profiling_get_frame_data, "info_array", "info_max"); diff --git a/core/object/script_language_extension.h b/core/object/script_language_extension.h index 00ab1cd6c0..5b10739486 100644 --- a/core/object/script_language_extension.h +++ b/core/object/script_language_extension.h @@ -265,7 +265,7 @@ public: GDVIRTUAL1RC(TypedArray<Dictionary>, _get_built_in_templates, StringName) - virtual Vector<ScriptTemplate> get_built_in_templates(StringName p_object) override { + virtual Vector<ScriptTemplate> get_built_in_templates(const StringName &p_object) override { TypedArray<Dictionary> ret; GDVIRTUAL_REQUIRED_CALL(_get_built_in_templates, p_object, ret); Vector<ScriptTemplate> stret; @@ -562,6 +562,7 @@ public: } EXBIND0(reload_all_scripts) + EXBIND2(reload_scripts, const Array &, bool) EXBIND2(reload_tool_script, const Ref<Script> &, bool) /* LOADER FUNCTIONS */ @@ -607,6 +608,7 @@ public: EXBIND0(profiling_start) EXBIND0(profiling_stop) + EXBIND1(profiling_set_save_native_calls, bool) GDVIRTUAL2R(int, _profiling_get_accumulated_data, GDExtensionPtr<ScriptLanguageExtensionProfilingInfo>, int) diff --git a/core/object/undo_redo.cpp b/core/object/undo_redo.cpp index 3c1d4ef95e..569e6ae19f 100644 --- a/core/object/undo_redo.cpp +++ b/core/object/undo_redo.cpp @@ -316,6 +316,14 @@ void UndoRedo::commit_action(bool p_execute) { _redo(p_execute); // perform action committing--; + if (max_steps > 0) { + // Clear early steps. + + while (actions.size() > max_steps) { + _pop_history_tail(); + } + } + if (add_message && callback && actions.size() > 0) { callback(callback_ud, actions[actions.size() - 1].name); } @@ -473,6 +481,14 @@ uint64_t UndoRedo::get_version() const { return version; } +void UndoRedo::set_max_steps(int p_max_steps) { + max_steps = p_max_steps; +} + +int UndoRedo::get_max_steps() const { + return max_steps; +} + void UndoRedo::set_commit_notify_callback(CommitNotifyCallback p_callback, void *p_ud) { callback = p_callback; callback_ud = p_ud; @@ -517,9 +533,13 @@ void UndoRedo::_bind_methods() { ClassDB::bind_method(D_METHOD("has_undo"), &UndoRedo::has_undo); ClassDB::bind_method(D_METHOD("has_redo"), &UndoRedo::has_redo); ClassDB::bind_method(D_METHOD("get_version"), &UndoRedo::get_version); + ClassDB::bind_method(D_METHOD("set_max_steps", "max_steps"), &UndoRedo::set_max_steps); + ClassDB::bind_method(D_METHOD("get_max_steps"), &UndoRedo::get_max_steps); ClassDB::bind_method(D_METHOD("redo"), &UndoRedo::redo); ClassDB::bind_method(D_METHOD("undo"), &UndoRedo::undo); + ADD_PROPERTY(PropertyInfo(Variant::INT, "max_steps", PROPERTY_HINT_RANGE, "0,50,1,or_greater"), "set_max_steps", "get_max_steps"); + ADD_SIGNAL(MethodInfo("version_changed")); BIND_ENUM_CONSTANT(MERGE_DISABLE); diff --git a/core/object/undo_redo.h b/core/object/undo_redo.h index b3a3322e4b..62aebff877 100644 --- a/core/object/undo_redo.h +++ b/core/object/undo_redo.h @@ -80,6 +80,7 @@ private: int current_action = -1; bool force_keep_in_merge_ends = false; int action_level = 0; + int max_steps = 0; MergeMode merge_mode = MERGE_DISABLE; bool merging = false; uint64_t version = 1; @@ -135,6 +136,9 @@ public: uint64_t get_version() const; + void set_max_steps(int p_max_steps); + int get_max_steps() const; + void set_commit_notify_callback(CommitNotifyCallback p_callback, void *p_ud); void set_method_notify_callback(MethodNotifyCallback p_method_callback, void *p_ud); diff --git a/core/object/worker_thread_pool.cpp b/core/object/worker_thread_pool.cpp index 784acadab4..e2ab473b01 100644 --- a/core/object/worker_thread_pool.cpp +++ b/core/object/worker_thread_pool.cpp @@ -33,6 +33,7 @@ #include "core/object/script_language.h" #include "core/os/os.h" #include "core/os/thread_safe.h" +#include "core/templates/command_queue_mt.h" void WorkerThreadPool::Task::free_template_userdata() { ERR_FAIL_NULL(template_userdata); @@ -43,24 +44,18 @@ void WorkerThreadPool::Task::free_template_userdata() { WorkerThreadPool *WorkerThreadPool::singleton = nullptr; -void WorkerThreadPool::_process_task_queue() { - task_mutex.lock(); - Task *task = task_queue.first()->self(); - task_queue.remove(task_queue.first()); - task_mutex.unlock(); - _process_task(task); -} +thread_local CommandQueueMT *WorkerThreadPool::flushing_cmd_queue = nullptr; void WorkerThreadPool::_process_task(Task *p_task) { - bool low_priority = p_task->low_priority; - int pool_thread_index = -1; - Task *prev_low_prio_task = nullptr; // In case this is recursively called. +#ifdef THREADS_ENABLED + int pool_thread_index = thread_ids[Thread::get_caller_id()]; + ThreadData &curr_thread = threads[pool_thread_index]; + Task *prev_task = nullptr; // In case this is recursively called. + bool safe_for_nodes_backup = is_current_thread_safe_for_nodes(); - if (!use_native_low_priority_threads) { + { // Tasks must start with this unset. They are free to set-and-forget otherwise. set_current_thread_safe_for_nodes(false); - pool_thread_index = thread_ids[Thread::get_caller_id()]; - ThreadData &curr_thread = threads[pool_thread_index]; // Since the WorkerThreadPool is started before the script server, // its pre-created threads can't have ScriptServer::thread_enter() called on them early. // Therefore, we do it late at the first opportunity, so in case the task @@ -71,15 +66,11 @@ void WorkerThreadPool::_process_task(Task *p_task) { } task_mutex.lock(); p_task->pool_thread_index = pool_thread_index; - if (low_priority) { - low_priority_tasks_running++; - prev_low_prio_task = curr_thread.current_low_prio_task; - curr_thread.current_low_prio_task = p_task; - } else { - curr_thread.current_low_prio_task = nullptr; - } + prev_task = curr_thread.current_task; + curr_thread.current_task = p_task; task_mutex.unlock(); } +#endif if (p_task->group) { // Handling a group @@ -111,33 +102,24 @@ void WorkerThreadPool::_process_task(Task *p_task) { memdelete(p_task->template_userdata); // This is no longer needed at this point, so get rid of it. } - if (low_priority && use_native_low_priority_threads) { - p_task->completed = true; - p_task->done_semaphore.post(); - if (do_post) { - p_task->group->completed.set_to(true); - } - } else { - if (do_post) { - p_task->group->done_semaphore.post(); - p_task->group->completed.set_to(true); - } - uint32_t max_users = p_task->group->tasks_used + 1; // Add 1 because the thread waiting for it is also user. Read before to avoid another thread freeing task after increment. - uint32_t finished_users = p_task->group->finished.increment(); - - if (finished_users == max_users) { - // Get rid of the group, because nobody else is using it. - task_mutex.lock(); - group_allocator.free(p_task->group); - task_mutex.unlock(); - } - - // For groups, tasks get rid of themselves. + if (do_post) { + p_task->group->done_semaphore.post(); + p_task->group->completed.set_to(true); + } + uint32_t max_users = p_task->group->tasks_used + 1; // Add 1 because the thread waiting for it is also user. Read before to avoid another thread freeing task after increment. + uint32_t finished_users = p_task->group->finished.increment(); + if (finished_users == max_users) { + // Get rid of the group, because nobody else is using it. task_mutex.lock(); - task_allocator.free(p_task); + group_allocator.free(p_task->group); task_mutex.unlock(); } + + // For groups, tasks get rid of themselves. + + task_mutex.lock(); + task_allocator.free(p_task); } else { if (p_task->native_func) { p_task->native_func(p_task->native_func_userdata); @@ -150,88 +132,164 @@ void WorkerThreadPool::_process_task(Task *p_task) { task_mutex.lock(); p_task->completed = true; - for (uint8_t i = 0; i < p_task->waiting; i++) { - p_task->done_semaphore.post(); + p_task->pool_thread_index = -1; + if (p_task->waiting_user) { + p_task->done_semaphore.post(p_task->waiting_user); } - if (!use_native_low_priority_threads) { - p_task->pool_thread_index = -1; + // Let awaiters know. + for (uint32_t i = 0; i < threads.size(); i++) { + if (threads[i].awaited_task == p_task) { + threads[i].cond_var.notify_one(); + threads[i].signaled = true; + } } - task_mutex.unlock(); // Keep mutex down to here since on unlock the task may be freed. } - // Task may have been freed by now (all callers notified). - p_task = nullptr; - - if (!use_native_low_priority_threads) { - bool post = false; - task_mutex.lock(); - ThreadData &curr_thread = threads[pool_thread_index]; - curr_thread.current_low_prio_task = prev_low_prio_task; - if (low_priority) { +#ifdef THREADS_ENABLED + { + curr_thread.current_task = prev_task; + if (p_task->low_priority) { low_priority_threads_used--; - low_priority_tasks_running--; - // A low prioriry task was freed, so see if we can move a pending one to the high priority queue. - if (_try_promote_low_priority_task()) { - post = true; - } - if (low_priority_tasks_awaiting_others == low_priority_tasks_running) { - _prevent_low_prio_saturation_deadlock(); + if (_try_promote_low_priority_task()) { + if (prev_task) { // Otherwise, this thread will catch it. + _notify_threads(&curr_thread, 1, 0); + } } } + task_mutex.unlock(); - if (post) { - task_available_semaphore.post(); - } } + + set_current_thread_safe_for_nodes(safe_for_nodes_backup); +#endif } void WorkerThreadPool::_thread_function(void *p_user) { + ThreadData *thread_data = (ThreadData *)p_user; while (true) { - singleton->task_available_semaphore.wait(); - if (singleton->exit_threads) { - break; + Task *task_to_process = nullptr; + { + MutexLock lock(singleton->task_mutex); + if (singleton->exit_threads) { + return; + } + thread_data->signaled = false; + + if (singleton->task_queue.first()) { + task_to_process = singleton->task_queue.first()->self(); + singleton->task_queue.remove(singleton->task_queue.first()); + } else { + thread_data->cond_var.wait(lock); + DEV_ASSERT(singleton->exit_threads || thread_data->signaled); + } } - singleton->_process_task_queue(); - } -} -void WorkerThreadPool::_native_low_priority_thread_function(void *p_user) { - Task *task = (Task *)p_user; - singleton->_process_task(task); + if (task_to_process) { + singleton->_process_task(task_to_process); + } + } } -void WorkerThreadPool::_post_task(Task *p_task, bool p_high_priority) { +void WorkerThreadPool::_post_tasks_and_unlock(Task **p_tasks, uint32_t p_count, bool p_high_priority) { // Fall back to processing on the calling thread if there are no worker threads. // Separated into its own variable to make it easier to extend this logic // in custom builds. bool process_on_calling_thread = threads.size() == 0; if (process_on_calling_thread) { - _process_task(p_task); + task_mutex.unlock(); + for (uint32_t i = 0; i < p_count; i++) { + _process_task(p_tasks[i]); + } return; } - task_mutex.lock(); - p_task->low_priority = !p_high_priority; - if (!p_high_priority && use_native_low_priority_threads) { - p_task->low_priority_thread = native_thread_allocator.alloc(); - task_mutex.unlock(); + uint32_t to_process = 0; + uint32_t to_promote = 0; - if (p_task->group) { - p_task->group->low_priority_native_tasks.push_back(p_task); + ThreadData *caller_pool_thread = thread_ids.has(Thread::get_caller_id()) ? &threads[thread_ids[Thread::get_caller_id()]] : nullptr; + + for (uint32_t i = 0; i < p_count; i++) { + p_tasks[i]->low_priority = !p_high_priority; + if (p_high_priority || low_priority_threads_used < max_low_priority_threads) { + task_queue.add_last(&p_tasks[i]->task_elem); + if (!p_high_priority) { + low_priority_threads_used++; + } + to_process++; + } else { + // Too many threads using low priority, must go to queue. + low_priority_task_queue.add_last(&p_tasks[i]->task_elem); + to_promote++; } - p_task->low_priority_thread->start(_native_low_priority_thread_function, p_task); // Pask task directly to thread. - } else if (p_high_priority || low_priority_threads_used < max_low_priority_threads) { - task_queue.add_last(&p_task->task_elem); - if (!p_high_priority) { - low_priority_threads_used++; + } + + _notify_threads(caller_pool_thread, to_process, to_promote); + + task_mutex.unlock(); +} + +void WorkerThreadPool::_notify_threads(const ThreadData *p_current_thread_data, uint32_t p_process_count, uint32_t p_promote_count) { + uint32_t to_process = p_process_count; + uint32_t to_promote = p_promote_count; + + // This is where which threads are awaken is decided according to the workload. + // Threads that will anyway have a chance to check the situation and process/promote tasks + // are excluded from being notified. Others will be tried anyway to try to distribute load. + // The current thread, if is a pool thread, is also excluded depending on the promoting/processing + // needs because it will anyway loop again. However, it will contribute to decreasing the count, + // which helps reducing sync traffic. + + uint32_t thread_count = threads.size(); + + // First round: + // 1. For processing: notify threads that are not running tasks, to keep the stacks as shallow as possible. + // 2. For promoting: since it's exclusive with processing, we fin threads able to promote low-prio tasks now. + for (uint32_t i = 0; + i < thread_count && (to_process || to_promote); + i++, notify_index = (notify_index + 1) % thread_count) { + ThreadData &th = threads[notify_index]; + + if (th.signaled) { + continue; + } + if (th.current_task) { + // Good thread for promoting low-prio? + if (to_promote && th.awaited_task && th.current_task->low_priority) { + if (likely(&th != p_current_thread_data)) { + th.cond_var.notify_one(); + } + th.signaled = true; + to_promote--; + } + } else { + if (to_process) { + if (likely(&th != p_current_thread_data)) { + th.cond_var.notify_one(); + } + th.signaled = true; + to_process--; + } + } + } + + // Second round: + // For processing: if the first round wasn't enough, let's try now with threads processing tasks but currently awaiting. + for (uint32_t i = 0; + i < thread_count && to_process; + i++, notify_index = (notify_index + 1) % thread_count) { + ThreadData &th = threads[notify_index]; + + if (th.signaled) { + continue; + } + if (th.awaited_task) { + if (likely(&th != p_current_thread_data)) { + th.cond_var.notify_one(); + } + th.signaled = true; + to_process--; } - task_mutex.unlock(); - task_available_semaphore.post(); - } else { - // Too many threads using low priority, must go to queue. - low_priority_task_queue.add_last(&p_task->task_elem); - task_mutex.unlock(); } } @@ -247,23 +305,6 @@ bool WorkerThreadPool::_try_promote_low_priority_task() { } } -void WorkerThreadPool::_prevent_low_prio_saturation_deadlock() { - if (low_priority_tasks_awaiting_others == low_priority_tasks_running) { -#ifdef DEV_ENABLED - print_verbose("WorkerThreadPool: Low-prio slots saturated with tasks all waiting for other low-prio tasks. Attempting to avoid deadlock by scheduling one extra task."); -#endif - // In order not to create dependency cycles, we can only schedule the next one. - // We'll keep doing the same until the deadlock is broken, - SelfList<Task> *to_promote = low_priority_task_queue.first(); - if (to_promote) { - low_priority_task_queue.remove(to_promote); - task_queue.add_last(to_promote); - low_priority_threads_used++; - task_available_semaphore.post(); - } - } -} - WorkerThreadPool::TaskID WorkerThreadPool::add_native_task(void (*p_func)(void *), void *p_userdata, bool p_high_priority, const String &p_description) { return _add_task(Callable(), p_func, p_userdata, nullptr, p_high_priority, p_description); } @@ -273,15 +314,15 @@ WorkerThreadPool::TaskID WorkerThreadPool::_add_task(const Callable &p_callable, // Get a free task Task *task = task_allocator.alloc(); TaskID id = last_task++; + task->self = id; task->callable = p_callable; task->native_func = p_func; task->native_func_userdata = p_userdata; task->description = p_description; task->template_userdata = p_template_userdata; tasks.insert(id, task); - task_mutex.unlock(); - _post_task(task, p_high_priority); + _post_tasks_and_unlock(&task, 1, p_high_priority); return id; } @@ -313,105 +354,117 @@ Error WorkerThreadPool::wait_for_task_completion(TaskID p_task_id) { } Task *task = *taskp; - if (!task->completed) { - if (!use_native_low_priority_threads && task->pool_thread_index != -1) { // Otherwise, it's not running yet. - int caller_pool_th_index = thread_ids.has(Thread::get_caller_id()) ? thread_ids[Thread::get_caller_id()] : -1; - if (caller_pool_th_index == task->pool_thread_index) { - // Deadlock prevention. - // Waiting for a task run on this same thread? That means the task to be awaited started waiting as well - // and another task was run to make use of the thread in the meantime, with enough bad luck as to - // the need to wait for the original task arose in turn. - // In other words, the task we want to wait for is buried in the stack. - // Let's report the caller about the issue to it handles as it sees fit. - task_mutex.unlock(); - return ERR_BUSY; - } + if (task->completed) { + if (task->waiting_pool == 0 && task->waiting_user == 0) { + tasks.erase(p_task_id); + task_allocator.free(task); } + task_mutex.unlock(); + return OK; + } + + ThreadData *caller_pool_thread = thread_ids.has(Thread::get_caller_id()) ? &threads[thread_ids[Thread::get_caller_id()]] : nullptr; + if (caller_pool_thread && p_task_id <= caller_pool_thread->current_task->self) { + // Deadlock prevention: + // When a pool thread wants to wait for an older task, the following situations can happen: + // 1. Awaited task is deep in the stack of the awaiter. + // 2. A group of awaiter threads end up depending on some tasks buried in the stack + // of their worker threads in such a way that progress can't be made. + // Both would entail a deadlock. Some may be handled here in the WorkerThreadPool + // with some extra logic and bookkeeping. However, there would still be unavoidable + // cases of deadlock because of the way waiting threads process outstanding tasks. + // Taking into account there's no feasible solution for every possible case + // with the current design, we just simply reject attempts to await on older tasks, + // with a specific error code that signals the situation so the caller can handle it. + task_mutex.unlock(); + return ERR_BUSY; + } + + if (caller_pool_thread) { + task->waiting_pool++; + } else { + task->waiting_user++; + } + + task_mutex.unlock(); + + if (caller_pool_thread) { + while (true) { + Task *task_to_process = nullptr; + { + MutexLock lock(task_mutex); + bool was_signaled = caller_pool_thread->signaled; + caller_pool_thread->signaled = false; + + if (task->completed) { + // This thread was awaken also for some reason, but it's about to exit. + // Let's find out what may be pending and forward the requests. + if (!exit_threads && was_signaled) { + uint32_t to_process = task_queue.first() ? 1 : 0; + uint32_t to_promote = caller_pool_thread->current_task->low_priority && low_priority_task_queue.first() ? 1 : 0; + if (to_process || to_promote) { + // This thread must be left alone since it won't loop again. + caller_pool_thread->signaled = true; + _notify_threads(caller_pool_thread, to_process, to_promote); + } + } - task->waiting++; - - bool is_low_prio_waiting_for_another = false; - if (!use_native_low_priority_threads) { - // Deadlock prevention: - // If all low-prio tasks are waiting for other low-prio tasks and there are no more free low-prio slots, - // we have a no progressable situation. We can apply a workaround, consisting in promoting an awaited queued - // low-prio task to the schedule queue so it can run and break the "impasse". - // NOTE: A similar reasoning could be made about high priority tasks, but there are usually much more - // than low-prio. Therefore, a deadlock there would only happen when dealing with a very complex task graph - // or when there are too few worker threads (limited platforms or exotic settings). If that turns out to be - // an issue in the real world, a further fix can be applied against that. - if (task->low_priority) { - bool awaiter_is_a_low_prio_task = thread_ids.has(Thread::get_caller_id()) && threads[thread_ids[Thread::get_caller_id()]].current_low_prio_task; - if (awaiter_is_a_low_prio_task) { - is_low_prio_waiting_for_another = true; - low_priority_tasks_awaiting_others++; - if (low_priority_tasks_awaiting_others == low_priority_tasks_running) { - _prevent_low_prio_saturation_deadlock(); + task->waiting_pool--; + if (task->waiting_pool == 0 && task->waiting_user == 0) { + tasks.erase(p_task_id); + task_allocator.free(task); } + + break; } - } - } - task_mutex.unlock(); + if (!exit_threads) { + // This is a thread from the pool. It shouldn't just idle. + // Let's try to process other tasks while we wait. - if (use_native_low_priority_threads && task->low_priority) { - task->done_semaphore.wait(); - } else { - bool current_is_pool_thread = thread_ids.has(Thread::get_caller_id()); - if (current_is_pool_thread) { - // We are an actual process thread, we must not be blocked so continue processing stuff if available. - bool must_exit = false; - while (true) { - if (task->done_semaphore.try_wait()) { - // If done, exit - break; + if (caller_pool_thread->current_task->low_priority && low_priority_task_queue.first()) { + if (_try_promote_low_priority_task()) { + _notify_threads(caller_pool_thread, 1, 0); + } } - if (!must_exit) { - if (task_available_semaphore.try_wait()) { - if (exit_threads) { - must_exit = true; - } else { - // Solve tasks while they are around. - bool safe_for_nodes_backup = is_current_thread_safe_for_nodes(); - _process_task_queue(); - set_current_thread_safe_for_nodes(safe_for_nodes_backup); - continue; - } - } else if (!use_native_low_priority_threads && task->low_priority) { - // A low prioriry task started waiting, so see if we can move a pending one to the high priority queue. - task_mutex.lock(); - bool post = _try_promote_low_priority_task(); - task_mutex.unlock(); - if (post) { - task_available_semaphore.post(); - } + + if (singleton->task_queue.first()) { + task_to_process = task_queue.first()->self(); + task_queue.remove(task_queue.first()); + } + + if (!task_to_process) { + caller_pool_thread->awaited_task = task; + + if (flushing_cmd_queue) { + flushing_cmd_queue->unlock(); + } + caller_pool_thread->cond_var.wait(lock); + if (flushing_cmd_queue) { + flushing_cmd_queue->lock(); } + + DEV_ASSERT(exit_threads || caller_pool_thread->signaled || task->completed); + caller_pool_thread->awaited_task = nullptr; } - OS::get_singleton()->delay_usec(1); // Microsleep, this could be converted to waiting for multiple objects in supported platforms for a bit more performance. } - } else { - task->done_semaphore.wait(); } - } - task_mutex.lock(); - if (is_low_prio_waiting_for_another) { - low_priority_tasks_awaiting_others--; + if (task_to_process) { + _process_task(task_to_process); + } } - - task->waiting--; - } - - if (task->waiting == 0) { - if (use_native_low_priority_threads && task->low_priority) { - task->low_priority_thread->wait_to_finish(); - native_thread_allocator.free(task->low_priority_thread); + } else { + task->done_semaphore.wait(); + task_mutex.lock(); + task->waiting_user--; + if (task->waiting_pool == 0 && task->waiting_user == 0) { + tasks.erase(p_task_id); + task_allocator.free(task); } - tasks.erase(p_task_id); - task_allocator.free(task); + task_mutex.unlock(); } - task_mutex.unlock(); return OK; } @@ -455,11 +508,8 @@ WorkerThreadPool::GroupID WorkerThreadPool::_add_group_task(const Callable &p_ca } groups[id] = group; - task_mutex.unlock(); - for (int i = 0; i < p_tasks; i++) { - _post_task(tasks_posted[i], p_high_priority); - } + _post_tasks_and_unlock(tasks_posted, p_tasks, p_high_priority); return id; } @@ -496,28 +546,24 @@ bool WorkerThreadPool::is_group_task_completed(GroupID p_group) const { } void WorkerThreadPool::wait_for_group_task_completion(GroupID p_group) { +#ifdef THREADS_ENABLED task_mutex.lock(); Group **groupp = groups.getptr(p_group); task_mutex.unlock(); if (!groupp) { ERR_FAIL_MSG("Invalid Group ID"); } - Group *group = *groupp; - if (group->low_priority_native_tasks.size() > 0) { - for (Task *task : group->low_priority_native_tasks) { - task->low_priority_thread->wait_to_finish(); - task_mutex.lock(); - native_thread_allocator.free(task->low_priority_thread); - task_allocator.free(task); - task_mutex.unlock(); - } + { + Group *group = *groupp; - task_mutex.lock(); - group_allocator.free(group); - task_mutex.unlock(); - } else { + if (flushing_cmd_queue) { + flushing_cmd_queue->unlock(); + } group->done_semaphore.wait(); + if (flushing_cmd_queue) { + flushing_cmd_queue->lock(); + } uint32_t max_users = group->tasks_used + 1; // Add 1 because the thread waiting for it is also user. Read before to avoid another thread freeing task after increment. uint32_t finished_users = group->finished.increment(); // fetch happens before inc, so increment later. @@ -533,21 +579,31 @@ void WorkerThreadPool::wait_for_group_task_completion(GroupID p_group) { task_mutex.lock(); // This mutex is needed when Physics 2D and/or 3D is selected to run on a separate thread. groups.erase(p_group); task_mutex.unlock(); +#endif +} + +int WorkerThreadPool::get_thread_index() { + Thread::ID tid = Thread::get_caller_id(); + return singleton->thread_ids.has(tid) ? singleton->thread_ids[tid] : -1; } -void WorkerThreadPool::init(int p_thread_count, bool p_use_native_threads_low_priority, float p_low_priority_task_ratio) { +void WorkerThreadPool::thread_enter_command_queue_mt_flush(CommandQueueMT *p_queue) { + ERR_FAIL_COND(flushing_cmd_queue != nullptr); + flushing_cmd_queue = p_queue; +} + +void WorkerThreadPool::thread_exit_command_queue_mt_flush() { + ERR_FAIL_NULL(flushing_cmd_queue); + flushing_cmd_queue = nullptr; +} + +void WorkerThreadPool::init(int p_thread_count, float p_low_priority_task_ratio) { ERR_FAIL_COND(threads.size() > 0); if (p_thread_count < 0) { p_thread_count = OS::get_singleton()->get_default_thread_pool_size(); } - if (p_use_native_threads_low_priority) { - max_low_priority_threads = 0; - } else { - max_low_priority_threads = CLAMP(p_thread_count * p_low_priority_task_ratio, 1, p_thread_count - 1); - } - - use_native_low_priority_threads = p_use_native_threads_low_priority; + max_low_priority_threads = CLAMP(p_thread_count * p_low_priority_task_ratio, 1, p_thread_count - 1); threads.resize(p_thread_count); @@ -563,24 +619,33 @@ void WorkerThreadPool::finish() { return; } - task_mutex.lock(); - SelfList<Task> *E = low_priority_task_queue.first(); - while (E) { - print_error("Task waiting was never re-claimed: " + E->self()->description); - E = E->next(); + { + MutexLock lock(task_mutex); + SelfList<Task> *E = low_priority_task_queue.first(); + while (E) { + print_error("Task waiting was never re-claimed: " + E->self()->description); + E = E->next(); + } } - task_mutex.unlock(); - exit_threads = true; - - for (uint32_t i = 0; i < threads.size(); i++) { - task_available_semaphore.post(); + { + MutexLock lock(task_mutex); + exit_threads = true; + } + for (ThreadData &data : threads) { + data.cond_var.notify_one(); } - for (ThreadData &data : threads) { data.thread.wait_to_finish(); } + { + MutexLock lock(task_mutex); + for (KeyValue<TaskID, Task *> &E : tasks) { + task_allocator.free(E.value); + } + } + threads.clear(); } diff --git a/core/object/worker_thread_pool.h b/core/object/worker_thread_pool.h index f323a979f7..c9921c808d 100644 --- a/core/object/worker_thread_pool.h +++ b/core/object/worker_thread_pool.h @@ -31,6 +31,7 @@ #ifndef WORKER_THREAD_POOL_H #define WORKER_THREAD_POOL_H +#include "core/os/condition_variable.h" #include "core/os/memory.h" #include "core/os/os.h" #include "core/os/semaphore.h" @@ -40,6 +41,8 @@ #include "core/templates/rid.h" #include "core/templates/safe_refcount.h" +class CommandQueueMT; + class WorkerThreadPool : public Object { GDCLASS(WorkerThreadPool, Object) public: @@ -60,7 +63,7 @@ private: }; struct Group { - GroupID self; + GroupID self = -1; SafeNumeric<uint32_t> index; SafeNumeric<uint32_t> completed_index; uint32_t max = 0; @@ -68,23 +71,23 @@ private: SafeFlag completed; SafeNumeric<uint32_t> finished; uint32_t tasks_used = 0; - TightLocalVector<Task *> low_priority_native_tasks; }; struct Task { + TaskID self = -1; Callable callable; void (*native_func)(void *) = nullptr; void (*native_group_func)(void *, uint32_t) = nullptr; void *native_func_userdata = nullptr; String description; - Semaphore done_semaphore; + Semaphore done_semaphore; // For user threads awaiting. bool completed = false; Group *group = nullptr; SelfList<Task> task_elem; - uint32_t waiting = 0; + uint32_t waiting_pool = 0; + uint32_t waiting_user = 0; bool low_priority = false; BaseTemplateUserdata *template_userdata = nullptr; - Thread *low_priority_thread = nullptr; int pool_thread_index = -1; void free_template_userdata(); @@ -92,51 +95,65 @@ private: task_elem(this) {} }; - PagedAllocator<Task> task_allocator; - PagedAllocator<Group> group_allocator; - PagedAllocator<Thread> native_thread_allocator; + static const uint32_t TASKS_PAGE_SIZE = 1024; + static const uint32_t GROUPS_PAGE_SIZE = 256; + + PagedAllocator<Task, false, TASKS_PAGE_SIZE> task_allocator; + PagedAllocator<Group, false, GROUPS_PAGE_SIZE> group_allocator; SelfList<Task>::List low_priority_task_queue; SelfList<Task>::List task_queue; - Mutex task_mutex; - Semaphore task_available_semaphore; + BinaryMutex task_mutex; struct ThreadData { - uint32_t index; + uint32_t index = 0; Thread thread; - Task *current_low_prio_task = nullptr; bool ready_for_scripting = false; + bool signaled = false; + Task *current_task = nullptr; + Task *awaited_task = nullptr; // Null if not awaiting the condition variable. Special value for idle-waiting. + ConditionVariable cond_var; }; TightLocalVector<ThreadData> threads; bool exit_threads = false; HashMap<Thread::ID, int> thread_ids; - HashMap<TaskID, Task *> tasks; - HashMap<GroupID, Group *> groups; + HashMap< + TaskID, + Task *, + HashMapHasherDefault, + HashMapComparatorDefault<TaskID>, + PagedAllocator<HashMapElement<TaskID, Task *>, false, TASKS_PAGE_SIZE>> + tasks; + HashMap< + GroupID, + Group *, + HashMapHasherDefault, + HashMapComparatorDefault<GroupID>, + PagedAllocator<HashMapElement<GroupID, Group *>, false, GROUPS_PAGE_SIZE>> + groups; - bool use_native_low_priority_threads = false; uint32_t max_low_priority_threads = 0; uint32_t low_priority_threads_used = 0; - uint32_t low_priority_tasks_running = 0; - uint32_t low_priority_tasks_awaiting_others = 0; + uint32_t notify_index = 0; // For rotating across threads, no help distributing load. uint64_t last_task = 1; static void _thread_function(void *p_user); - static void _native_low_priority_thread_function(void *p_user); - void _process_task_queue(); void _process_task(Task *task); - void _post_task(Task *p_task, bool p_high_priority); + void _post_tasks_and_unlock(Task **p_tasks, uint32_t p_count, bool p_high_priority); + void _notify_threads(const ThreadData *p_current_thread_data, uint32_t p_process_count, uint32_t p_promote_count); bool _try_promote_low_priority_task(); - void _prevent_low_prio_saturation_deadlock(); static WorkerThreadPool *singleton; + static thread_local CommandQueueMT *flushing_cmd_queue; + TaskID _add_task(const Callable &p_callable, void (*p_func)(void *), void *p_userdata, BaseTemplateUserdata *p_template_userdata, bool p_high_priority, const String &p_description); GroupID _add_group_task(const Callable &p_callable, void (*p_func)(void *, uint32_t), void *p_userdata, BaseTemplateUserdata *p_template_userdata, int p_elements, int p_tasks, bool p_high_priority, const String &p_description); @@ -197,7 +214,12 @@ public: _FORCE_INLINE_ int get_thread_count() const { return threads.size(); } static WorkerThreadPool *get_singleton() { return singleton; } - void init(int p_thread_count = -1, bool p_use_native_threads_low_priority = true, float p_low_priority_task_ratio = 0.3); + static int get_thread_index(); + + static void thread_enter_command_queue_mt_flush(CommandQueueMT *p_queue); + static void thread_exit_command_queue_mt_flush(); + + void init(int p_thread_count = -1, float p_low_priority_task_ratio = 0.3); void finish(); WorkerThreadPool(); ~WorkerThreadPool(); |